From 95bda804c2d02ecbe824b5e7f9849baf395da03d Mon Sep 17 00:00:00 2001 From: copie Date: Wed, 4 Jan 2017 19:15:54 +0800 Subject: [PATCH] =?UTF-8?q?=E5=AE=8C=E6=88=90=E5=AF=B9=E4=BA=8Egs.amac.org?= =?UTF-8?q?.cn=E6=89=80=E6=9C=89=E4=BF=A1=E6=81=AF=E7=9A=84url=E6=8A=93?= =?UTF-8?q?=E5=8F=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../9.gs.amac.org.cn/README.md" | 2 + .../9.gs.amac.org.cn/shimu.py" | 51 +++++++------------ 2 files changed, 21 insertions(+), 32 deletions(-) create mode 100644 "6.\347\210\254\350\231\253\351\241\271\347\233\256\346\272\220\347\240\201/9.gs.amac.org.cn/README.md" diff --git "a/6.\347\210\254\350\231\253\351\241\271\347\233\256\346\272\220\347\240\201/9.gs.amac.org.cn/README.md" "b/6.\347\210\254\350\231\253\351\241\271\347\233\256\346\272\220\347\240\201/9.gs.amac.org.cn/README.md" new file mode 100644 index 00000000..46d3702d --- /dev/null +++ "b/6.\347\210\254\350\231\253\351\241\271\347\233\256\346\272\220\347\240\201/9.gs.amac.org.cn/README.md" @@ -0,0 +1,2 @@ +完成对于gs.amac.org.cn所有信息url的抓取,抓取的信息放在urllist里面。没有进行保存也没有进行详细信息的抓取,详细信息有太多项目,对我来说工作量太大 +Completion of all the information for the gs.amac.org.cn crawl, crawl information on the inside of the urllist. There is no save the details of the crawl, there are too many details of the project, for me too much work diff --git "a/6.\347\210\254\350\231\253\351\241\271\347\233\256\346\272\220\347\240\201/9.gs.amac.org.cn/shimu.py" "b/6.\347\210\254\350\231\253\351\241\271\347\233\256\346\272\220\347\240\201/9.gs.amac.org.cn/shimu.py" index 2cd52de2..671576ad 100644 --- "a/6.\347\210\254\350\231\253\351\241\271\347\233\256\346\272\220\347\240\201/9.gs.amac.org.cn/shimu.py" +++ "b/6.\347\210\254\350\231\253\351\241\271\347\233\256\346\272\220\347\240\201/9.gs.amac.org.cn/shimu.py" @@ -1,41 +1,28 @@ -#!/usr/bin/env python -# -*- coding: UTF-8 -*- -#------------------------------------------------------------------------- -# 程序:shimu.py -# 版本:0.1 -# 作者:copie -# 日期:编写日期2016/12/15 -# 语言:Python 3.5.x -# 系统: archlinux -# 操作:python shimu.py -# 功能: -#------------------------------------------------------------------------- from selenium import webdriver import time - -browser = webdriver.PhantomJS() +import bs4 +urllist = set() +browser = webdriver.Chrome() browser.get('http://gs.amac.org.cn/amac-infodisc/res/pof/manager/index.html') -time.sleep(10) +time.sleep(5) browser.get('http://gs.amac.org.cn/amac-infodisc/res/pof/manager/index.html') -files = open('ziliao.txt','w') -body=browser.find_element_by_xpath('//*[@id="managerList"]/tbody') -nextButton=browser.find_element_by_xpath('//*[@id="managerList_paginate"]/a[3]') -t=browser.find_element_by_xpath('//*[@id="managerList_length"]/label/select/option[4]') + +nextButton = browser.find_element_by_xpath( + '//*[@id="managerList_paginate"]/a[3]') +t = browser.find_element_by_xpath( + '//*[@id="managerList_length"]/label/select/option[4]') t.click() -i=1 -while 1: - str=body.text - strs=str.split('\n') - for s in strs: - files.writelines(s) - files.writelines('\n') - print(len(strs)) - if len(strs) < 100: +time.sleep(3) +i = 1 +while True: + soup = bs4.BeautifulSoup(browser.page_source, 'lxml') + if len(soup.findAll('tbody')[4].findAll('tr')) < 100: # 有一个BUG最后一个页面没有抓取就结束了 break + for tmp in soup.findAll('tbody')[4].findAll('tr'): + urllist.add(tmp.findAll('td')[1].find('a').get('href')) nextButton.click() + time.sleep(2) print(i) - i=i+1 + print(len(urllist)) + i = i + 1 time.sleep(5) -files.close() -browser.close() -