pan1
import urllib.requestimport reimport randomdef get_source(key): print('请稍等,爬取中....') headers = [{'User-Agent':'Mozilla/5.0 (Windows NT 6.3 WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.8.1000 Chrome/30.0.1599.101 Safari/537.36'},{'User-Agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:45.0) Gecko/20100101 Firefox/45.0"},{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 10.0; WOW64; Trident/7.0)'}] header = random.choice(headers) # 随机选取一个header keyword = key.encode('utf-8') keyword = urllib.request.quote(keyword) # 对关键词进行编码 # 这里是关键,称随机搜索一个资源名,然后对其结果网址进行分析 url = "http://www.wangpansou.cn/s.php?wp=0&ty=gn&op=gn&q="+keyword+"&q="+keyword req = urllib.request.Request(url, headers=header) html = urllib.request.urlopen(req) # 编码类型 head_type = html.headers['Content-Type'].split('=')[-1] status = html.getcode() # 获取状态码,只有访问成功了才继续。 if status == 200: html = html.read() html = html.decode(head_type) # 根据网站编码时行解码 # 正则匹配 pattern = re.compile('<a href="(.+)"><div class="cse-search-result_paging_num " tabindex="\d{1,3}">\d{1,3}</div></a>') content = pattern.findall(html) url_list = [] url_head = 'http://www.wangpansou.cn/' for i in content: i = url_head +i # 因为正匹配出来的只有一部分,所以把前面的一部分加上,开成完整的链接 if not i in url_list: # 去掉重复的,网页确实有两分,所以去重. url_list.append(i) # 得到所有 有搜索结果的页面网址列表 count = 1 # 计数用 for each_url in url_list: header = random.choice(headers) # 针对每个链接都随机选择一个'头',防止被服务器封掉 request1 = urllib.request.Request(each_url, headers=header) html2 = urllib.request.urlopen(request1) status = html2.getcode() # 获取状态码 if status == 200: html2 = html2.read() html2 = html2.decode(head_type) pattern1 = re.compile('<a class=".+" href="(.+)" rel.+') content1 = pattern1.findall(html2) pattern2 = re.compile('<div id=".+" class="cse-search-result_content_item_mid">\s+(.+)') content2 = pattern2.findall(html2) for i in range(0,len(content2)): print(str(count) + ':' + content2[i] + '\n' + content1[i]) print() count += 1 print('共搜索到%d个资源,已经全部爬取完毕!' % count)if __name__ == '__main__': get_source(input('请输入要搜索的资源名:'))说明:a.本搜索实际是通过通过网盘搜这个网站进行的二次搜索,如果找资源也可以直接到网盘搜进行一页一页的搜索
本脚本唯一的优点是一次性将所有结果全部爬下来,不用一页一页的翻找。
b.代码相当丑,但这也是对学习过程的记录,先实现功能,再考虑代码。
"""
略作优化,同时修正了两个小bug
按 Ctrl+C 复制代码
pan2# coding = 'utf-8'import urllib.requestimport reimport randomdef get_html(url, header): req = urllib.request.Request(url, headers=header) html = urllib.request.urlopen(req) head_type = html.headers['Content-Type'].split('=')[-1] status = html.getcode() return html, head_type, status # 分别得到html, 编码方式, 访问状态码headers = [{'User-Agent':'Mozilla/5.0 (Windows NT 6.3 WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/4.4.8.1000 Chrome/30.0.1599.101 Safari/537.36'},{'User-Agent':"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:45.0) Gecko/20100101 Firefox/45.0"},{'User-Agent': 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 10.0; WOW64; Trident/7.0)'}]keyword =input('请输入要搜索的资源名:')keyword = urllib.request.quote(keyword.encode('utf-8'))url = "http://www.wangpansou.cn/s.php?wp=0&ty=gn&op=gn&q="+keyword+"&q="+keywordheader = random.choice(headers)f_html, f_head_type, f_status = get_html(url, header)if f_status == 200: f_html = f_html.read() f_html = f_html.decode(f_head_type) pattern = re.compile('<a href="(.+)"><div class="cse-search-result_paging_num " tabindex="\d{1,3}">\d{1,3}</div></a>') content = pattern.findall(f_html) # 得到所有有相关结果的页面链接 url_list = [] url_head = 'http://www.wangpansou.cn/' for i in content: i = url_head +i # 因为正匹配出来的只有一部分,所以把前面的一部分加上,开成完整的链接 if not i in url_list: # 去掉重复的,网页确实有两分,所以去重. url_list.append(i) # 得到所有 有搜索结果的页面网址列表 first_url = url_list[0][:-2] + '0' # 加上第一页 url_list.insert(0,first_url)count = 0for each_url in url_list: header = random.choice(headers) s_html, s_head_type, s_status = get_html(each_url, header) if s_status == 200: s_html = s_html.read() s_html = s_html.decode(s_head_type) s_pattern = re.compile('<a class=".+" href="(.+)" rel.+') s_content = s_pattern.findall(s_html) # 分享的链接 t_pattern = re.compile('<div id=".+" class="cse-search-result_content_item_mid">\s+(.+)') t_content = t_pattern.findall(s_html) # 文件信息 else: print('Website Error!') for i in range(0, len(s_content)): count += 1 print(str(count) + ':' + t_content[i] + '\n' + s_content[i]) print() print('共搜索到%d个资源,已经全部爬取完毕!' % count)