Python爬取国外天气预报网站的方法
admin
2023-07-31 02:25:56
0

本文实例讲述了Python爬取国外天气预报网站的方法。分享给大家供大家参考。具体如下:

crawl_weather.py如下:

#encoding=utf-8
import httplib
import urllib2
import time
from threading import Thread
import threading
from Queue import Queue
from time import sleep
import re
import copy
lang = \"fr\"
count = 0
class Location:
  # Location(False, \"中国\", \"北京\", \"zh\")
  # Location(True, \"\", \"亚洲\", \"zh\")
  def __init__(self, is_beyond_country, country_name, loc_name, lang):
    self.country_name = country_name
    self.loc_name = loc_name
    self.lang = lang
    self.is_beyond_country = is_beyond_country
prn_lock = threading.RLock()
def GetLocationURLs(url, recursive):
  global count
  if url.find(\"weather-forecast\") != -1:
    count = count + 1
    if count % 500 == 0:
      prn_lock.acquire()
      print \"count:%d\" % (count)
      prn_lock.release()
    return [url]
  page = urllib2.urlopen(url).read()
  time.sleep(0.01)
  #\"
Africa
\" pattern = \"
(.*)
\" locs = re.findall(pattern, page) locs = [(url, name) for url, name in locs if url.find(\"browse-locations\") != -1 or url.find(\"weather-forecast\") != -1] if not recursive: urls = [url for url, name in locs] return urls urls = [] for _url, _name in locs: lst = GetLocationURLs(_url, True) urls.extend(lst) return urls #entry_url = \"http://www.accuweather.com/zh/browse-locations\" entry_url = \"http://www.accuweather.com/%s/browse-locations/eur/fr\" % (lang) #regions = [\"afr\", \"ant\", \"arc\", \"asi\", \"cac\", \"eur\", \"mea\", \"nam\", \"ocn\", \"sam\"] #regions = [\"eur\"] #region_urls = [ \"%s/%s\" % (entry_url, reg) for reg in regions] #region_urls = [\"http://www.accuweather.com/zh/browse-locations/eur/fr\"] sub_urls = GetLocationURLs(entry_url, False) print len(sub_urls) print sub_urls q = Queue() location_urls = [] ThreadNum = 5 lock = threading.RLock() for url in sub_urls: q.put(url) def working(): while True: url = q.get() lst = GetLocationURLs(url, True) print \"%s %d urls \" % (url, len(lst)) lock.acquire() location_urls.extend(lst) lock.release() q.task_done() for i in range(ThreadNum): t = Thread(target=working) t.setDaemon(True) t.start() q.join() fp = open(\'locations.txt\', \"w\") fp.write(\"\\n\".join(location_urls)) fp.close() #for url in location_urls: # print url #location_urls = GetLocationURLs(entry_url) \'\'\' def Fetch(url): try: print url web_path = url[0] local_name = url[1] print \"web_path:\", web_path print \"local_name:\", local_name sContent = urllib2.urlopen(web_path).read() savePath = \"D:\\\\Course\\\\NLP_Manning\\\\%s\" % (local_name) print savePath file = open(savePath,\'wb\') file.write(sContent) file.close() print savePath + \" saved\"; except: pass; def working(): while True: url = q.get() Fetch(url) sleep(10) q.task_done() #root_url = \"https://class.coursera.org/nlp/lecture/index?lecture_player=flash\" root_url = \"https://class.coursera.org/nlp/lecture/index?lecture_player=flash\" page = urllib2.urlopen(root_url).read() for i in range(NUM): t = Thread(target=working) t.setDaemon(True) t.start() urls = copy.deepcopy(ppt_urls) urls.extend(srt_urls) urls.extend(video_urls) print len(ppt_urls) print len(srt_urls) print len(video_urls) print len(urls) for url in urls: q.put(url) q.join() \'\'\' \'\'\' root_url = \"http://www.accuweather.com/zh/cn/andingmen/57494/weather-forecast/57494\" page = urllib2.urlopen(root_url).read() print page \'\'\'

FetchLocation.py如下:

#encoding=utf-8
import sys
import httplib
import urllib2
import time
from threading import Thread
import threading
from Queue import Queue
from time import sleep
import re
import copy
from xml.dom import minidom
import HTMLParser
import datetime
q = Queue()
locks = [threading.RLock() for i in range(2)]
ThreadNumber = 20
locations = {}
conds = {}
def FindCountryBreadCrumbs(page):
  lines = page.splitlines()
  count = 0
  start = -1
  opened = False
  for line in lines:
    if line.find(\"
    \") != -1: start = count opened = True if opened and line.find(\"
\") != -1: end = count opened = False count = count + 1 return \"\\n\".join(lines[start: (end + 1)]) def GetText(nodelist): rc = [] for node in nodelist: if node.nodeType == node.TEXT_NODE: rc.append(HTMLParser.HTMLParser().unescape(node.data)) return \'\'.join(rc) def FindCondition(page): pat = \"(.*?)\" cds = re.findall(pat, page) cds = [HTMLParser.HTMLParser().unescape(cd).encode(\"utf-8\") for cd in cds] return cds def ExtractInfo(url): try: page = urllib2.urlopen(url).read() except Exception, e: return [] text = FindCountryBreadCrumbs(page) text = HTMLParser.HTMLParser().unescape(text) dom = minidom.parseString(text.encode(\"utf-8\")) locs = [] lis = dom.getElementsByTagName(\"li\") for li in lis: adr_list = li.getElementsByTagName(\"a\") if adr_list: locs.append(GetText(adr_list[0].childNodes).encode(\"utf-8\")) strs = li.getElementsByTagName(\"strong\") if strs: locs.append(GetText(strs[0].childNodes).encode(\"utf-8\")) cds = FindCondition(page) return locs, cds def AddMap(lst, m): for x in lst: if m.get(x) == None: m[x] = 1 def working(): while True: urls = q.get() #print len(urls) m = {} m2 = {} count = 0 for url in urls: count = count + 1 #print \"%d/%d\" % (count, len(urls)) locs, cds = ExtractInfo(url) AddMap(locs, m) AddMap(cds, m2) locks[1].acquire() AddMap(m.keys(), locations) AddMap(m2.keys(), conds) locks[1].release() q.task_done() def main(): if len(sys.argv) < 2: exit() loc_path = sys.argv[1] fp = open(loc_path, \"r\") urls = [line.strip() for line in fp] fp.close() #urls = urls[0:1000] blocks = len(urls) / ThreadNumber + 1 for start in range(0, len(urls), blocks): end = start + blocks if end > len(urls): end = len(urls) q.put(urls[start:end]) for i in range(ThreadNumber): t = Thread(target=working) t.setDaemon(True) t.start() q.join() fp = open(\"location_name.fr\", \"w\") fp.write(\"\\n\".join(locations.keys())) fp.close() fp = open(\"conditions.fr\", \"w\") fp.write(\"\\n\".join(conds.keys())) fp.close() if __name__ == \'__main__\': main()

希望本文所述对大家的python程序设计有所帮助。

相关内容

热门资讯

Mobi、epub格式电子书如... 在wps里全局设置里有一个文件关联,打开,勾选电子书文件选项就可以了。
定时清理删除C:\Progra... C:\Program Files (x86)下面很多scoped_dir开头的文件夹 写个批处理 定...
500 行 Python 代码... 语法分析器描述了一个句子的语法结构,用来帮助其他的应用进行推理。自然语言引入了很多意外的歧义,以我们...
scoped_dir32_70... 一台虚拟机C盘总是莫名奇妙的空间用完,导致很多软件没法再运行。经过仔细检查发现是C:\Program...
65536是2的几次方 计算2... 65536是2的16次方:65536=2⁶ 65536是256的2次方:65536=256 6553...
小程序支付时提示:appid和... [Q]小程序支付时提示:appid和mch_id不匹配 [A]小程序和微信支付没有进行关联,访问“小...
pycparser 是一个用... `pycparser` 是一个用 Python 编写的 C 语言解析器。它可以用来解析 C 代码并构...
微信小程序使用slider实现... 众所周知哈,微信小程序里面的音频播放是没有进度条的,但最近有个项目呢,客户要求音频要有进度条控制,所...
Apache Doris 2.... 亲爱的社区小伙伴们,我们很高兴地向大家宣布,Apache Doris 2.0.0 版本已于...
python清除字符串里非数字... 本文实例讲述了python清除字符串里非数字字符的方法。分享给大家供大家参考。具体如下: impor...