这个程序其实很早之前就完成了,一直没有发出了,趁着最近不是很忙就分享给大家.
使用BeautifulSoup模块和urllib2模块实现,然后保存成word是使用python docx模块的,安装方式网上一搜一大堆,我就不再赘述了.
主要实现的功能是登陆知乎,然后将个人收藏的问题和答案获取到之后保存为word文档,以便没有网络的时候可以查阅.当然,答案中如果有图片的话也是可以获取到的.不过这块还是有点问题的.等以后有时间了在修改修改吧.
还有就是正则,用的简直不要太烂…鄙视下自己…
还有,现在是问题的话所有的答案都会保存下来的.看看有时间修改成只保存第一个答案或者收藏页问题的答案吧.要不然如果收藏的太多了的话保存下来的word会吓你一跳的哦.O(∩_∩)O哈哈~
在登陆的时候可能会需要验证码,如果提示输入验证码的话在程序的文件夹下面就可以看到验证码的图片,照着输入就ok了.
# -*- coding: utf-8 -*-
#登陆知乎抓取个人收藏 然后保存为word
import sys
reload(sys)
sys.setdefaultencoding(\'utf-8\')
import urllib
import urllib2
import cookielib
import string
import re
from bs4 import BeautifulSoup
from docx import Document
from docx import *
from docx.shared import Inches
from sys import exit
import os
#这儿是因为在公司上网的话需要使用socket代理
#import socks
#import socket
#socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5,\"127.0.0.1\",8088)
#socket.socket =socks.socksocket
loginurl=\'http://www.zhihu.com/login\'
headers = {\'User-Agent\' : \'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.116 Safari/537.36\',}
postdata={
\'_xsrf\': \'acab9d276ea217226d9cc94a84a231f7\',
\'email\': \'\',
\'password\': \'\',
\'rememberme\':\'y\'
}
if not os.path.exists(\'myimg\'):
os.mkdir(\'myimg\')
if os.path.exists(\'123.docx\'):
os.remove(\'123.docx\')
if os.path.exists(\'checkcode.gif\'):
os.remove(\'checkcode.gif\')
mydoc=Document()
questiontitle=\'\'
#----------------------------------------------------------------------
def dealimg(imgcontent):
soup=BeautifulSoup(imgcontent)
try:
for imglink in soup.findAll(\'img\'):
if imglink is not None :
myimg= imglink.get(\'src\')
#print myimg
if myimg.find(\'http\')>=0:
imgsrc=urllib2.urlopen(myimg).read()
imgnamere=re.compile(r\'http\\S*/\')
imgname=imgnamere.sub(\'\',myimg)
#print imgname
with open(u\'myimg\'+\'/\'+imgname,\'wb\') as code:
code.write(imgsrc)
mydoc.add_picture(u\'myimg/\'+imgname,width=Inches(1.25))
except:
pass
strinfo=re.compile(r\'\')
imgcontent=strinfo.sub(\'\',imgcontent)
strinfo=re.compile(r\'
\')
imgcontent=strinfo.sub(\'\',imgcontent)
#show all
strinfo=re.compile(r\'\')
imgcontent=strinfo.sub(\'\',imgcontent)
strinfo=re.compile(r\'\')
imgcontent=strinfo.sub(\'\',imgcontent)
imgcontent=imgcontent.replace(\'\',\'\')
imgcontent=imgcontent.replace(\'\',\'\').replace(\'\',\'\').replace(\'\',\'\').replace(\'
\',\'\').replace(\'
\',\'\')
return imgcontent
def enterquestionpage(pageurl):
html=urllib2.urlopen(pageurl).read()
soup=BeautifulSoup(html)
questiontitle=soup.title.string
mydoc.add_heading(questiontitle,level=3)
for div in soup.findAll(\'div\',{\'class\':\'fixed-summary zm-editable-content clearfix\'}):
#print div
conent=str(div).replace(\'
\',\'\').replace(\'\',\'\')
conent=conent.decode(\'utf-8\')
conent=conent.replace(\'
\',\'\\n\')
conent=dealimg(conent)
###这一块弄得太复杂了 有时间找找看有没有处理html的模块
conent=conent.replace(\'\',\'\').replace(\'\',\'\').replace(\'\',\'\').replace(\'\',\'\').replace(\'\',\'\').replace(\'\',\'\').replace(\'\',\'\').replace(\'
\',\'\')
mydoc.add_paragraph(conent,style=\'BodyText3\')
\"\"\"file=open(\'222.txt\',\'a\')
file.write(str(conent))
file.close()\"\"\"
def entercollectpage(pageurl):
html=urllib2.urlopen(pageurl).read()
soup=BeautifulSoup(html)
for div in soup.findAll(\'div\',{\'class\':\'zm-item\'}):
h2content=div.find(\'h2\',{\'class\':\'zm-item-title\'})
#print h2content
if h2content is not None:
link=h2content.find(\'a\')
mylink=link.get(\'href\')
quectionlink=\'http://www.zhihu.com\'+mylink
enterquestionpage(quectionlink)
print quectionlink
def loginzhihu():
postdatastr=urllib.urlencode(postdata)
\'\'\'
cj = cookielib.LWPCookieJar()
cookie_support = urllib2.HTTPCookieProcessor(cj)
opener = urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
urllib2.install_opener(opener)
\'\'\'
h = urllib2.urlopen(loginurl)
request = urllib2.Request(loginurl,postdatastr,headers)
request.get_origin_req_host
response = urllib2.urlopen(request)
#print response.geturl()
text = response.read()
collecturl=\'http://www.zhihu.com/collections\'
req=urllib2.urlopen(collecturl)
if str(req.geturl())==\'http://www.zhihu.com/?next=%2Fcollections\':
print \'login fail!\'
return
txt=req.read()
soup=BeautifulSoup(txt)
count=0
divs =soup.findAll(\'div\',{\'class\':\'zm-item\'})
if divs is None:
print \'login fail!\'
return
print \'login ok!\\n\'
for div in divs:
link=div.find(\'a\')
mylink=link.get(\'href\')
collectlink=\'http://www.zhihu.com\'+mylink
entercollectpage(collectlink)
print collectlink
#这儿是当时做测试用的,值获取一个收藏
#count+=1
#if count==1:
# return
def getcheckcode(thehtml):
soup=BeautifulSoup(thehtml)
div=soup.find(\'div\',{\'class\':\'js-captcha captcha-wrap\'})
if div is not None:
#print div
imgsrc=div.find(\'img\')
imglink=imgsrc.get(\'src\')
if imglink is not None:
imglink=\'http://www.zhihu.com\'+imglink
imgcontent=urllib2.urlopen(imglink).read()
with open(\'checkcode.gif\',\'wb\') as code:
code.write(imgcontent)
return True
else:
return False
return False
if __name__==\'__main__\':
import getpass
username=raw_input(\'input username:\')
password=getpass.getpass(\'Enter password: \')
postdata[\'email\']=username
postdata[\'password\']=password
postdatastr=urllib.urlencode(postdata)
cj = cookielib.LWPCookieJar()
cookie_support = urllib2.HTTPCookieProcessor(cj)
opener = urllib2.build_opener(cookie_support,urllib2.HTTPHandler)
urllib2.install_opener(opener)
h = urllib2.urlopen(loginurl)
request = urllib2.Request(loginurl,postdatastr,headers)
response = urllib2.urlopen(request)
txt = response.read()
if getcheckcode(txt):
checkcode=raw_input(\'input checkcode:\')
postdata[\'captcha\']=checkcode
loginzhihu()
mydoc.save(\'123.docx\')
else:
loginzhihu()
mydoc.save(\'123.docx\')
print \'the end\'
raw_input()
好了,大概就是这样,大家如果有什么好的建议或者什么的可以再下面留言,我会尽快回复的.或者在小站的关于页面有我的联系方式,直接联系我就ok.
相关内容