代码优化
- 没增加任何新功能,仅仅是想让代码至少自己看起来舒服些
- 对firefox配置adblock,通过firefox配置文件模式减少广告的载入
- 尽量减少相似的代码
- 尽量使用子程序增加可维护性
- 将启动两个firefox优化为启动一个
- 为下一步多线程模式打下个基础
#coding=utf-8
from selenium import webdriver
import time
import os
import re
import sys
from selenium.common.exceptions import NoSuchElementException
reload(sys)
sys.setdefaultencoding(\'utf8\')
#######################
def getinfo(mydriver,g_myfile,gourl):
linetext=\"\"
mydriver.get(gourl)
try:
gupiaoming=mydriver.find_element_by_xpath(title).text
gupiaocode=mydriver.find_element_by_xpath(code).text
hexinshuju=mydriver.find_element_by_class_name(\'pad5\')
shujuhang=hexinshuju.find_elements_by_tag_name(\'tr\')
for i in range(len(shujuhang)-2):
shujulie=shujuhang[i].find_elements_by_tag_name(\'td\')
tmpshuju=myre.split(shujulie[0].text)
linetext=linetext+\"~\"+tmpshuju[1]
shuju=myre.split(shujuhang[8].text)
linetext=linetext+\"~\"+shuju[1]
tmpshuju=myre.split(shujuhang[9].text)
linetext=linetext+\"~\"+tmpshuju[1]
linetext=\"%s~%s%s\"%(gupiaoming,gupiaocode,linetext)
print \"数据写入\",linetext
g_myfile.write(\"%s\\n\"%(linetext))
except NoSuchElementException,e:
print \"不是股票\"
def saveinfob(s_filename,s_list):
s_myfile=open(s_filename,\'w\')
for s_myurl in s_list:
print s_myurl
if s_myurl!=None:
getinfo(br,s_myfile,s_myurl)
s_myfile.close()
#通过link对象获得链接地址的text文本
def getlinkurl(linklist):
my=[]
for x in linklist:
my.append(x.get_attribute(\'href\'))
return my
def geturls(mydriver):
sz=[]
sh=[]
br.get(\"http://quote.eastmoney.com/stocklist.html\")
shxpath=\"/html/body/div[9]/div[2]/div/ul[1]\"
szxpath=\"/html/body/div[9]/div[2]/div/ul[2]\"
shgupiao=br.find_element_by_xpath(shxpath)
szgupiao=br.find_element_by_xpath(szxpath)
shgupiaolist=shgupiao.find_elements_by_tag_name(\'a\')
szgupiaolist=szgupiao.find_elements_by_tag_name(\'a\')
sh=getlinkurl(shgupiaolist)
sz=getlinkurl(szgupiaolist)
return sh,sz
######################
#firefox配置文件目录
profileDir = \"/Users/pro/Desktop/mystudy/python\"
profile = webdriver.FirefoxProfile(profileDir)
#加载带有配置文件的firefox
br=webdriver.Firefox(profile)
#获得所有链接地址
(sh,sz)=geturls(br)
title=\'//*[@id=\"name\"]\'
code=\'//*[@id=\"code\"]\'
hexinshujuxpath=\"/html/body/div[14]/div[1]/div[4]/div[1]\"
restr=\":\".decode(\'utf8\')
myre=re.compile(restr,re.I|re.M|re.S)
#获得数据并保存
saveinfob(\'tmpshgupiaodata.txt\',sh)
saveinfob(\'tmpszgupiaodata.txt\',sz)
br.quit()
print \"数据下载完成\"