-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathscrape_author.py
91 lines (76 loc) · 3.64 KB
/
scrape_author.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import sys # used for taking input from the command line
from bs4 import BeautifulSoup # used for parsing and doing some funky HTML stuff
import urllib # used for grabbing data from URLs
import unicodedata # for converting unicode to ascii
"""
Jon Hurlock's Google Scholar Tools by Jon Hurlock (@jonhurlock)
* is licensed under a Creative Commons Attribution-ShareAlike 3.0 Unported License.
* Permissions beyond the scope of this license may be available at http://cs.swan.ac.uk/~csjonhurlock/
--------
The following page scrapes important data from a google scholar's page.
You must provide an URL for an individual scholar, enclosed in quotations marks
To run
--------
python scrape_author.py "some url"
python scrape_author.py help
Output
--------
Publications for Jonathan Hurlock:
Searching Twitter: Separating the Tweet from the Chaff. ==> http://scholar.google.co.uk/citations?view_op=view_citation&hl=en&oe=ASCII&user=pu0mIWgAAAAJ&citation_for_view=pu0mIWgAAAAJ:u5HHmVD_uO8C
Keyword clouds: having very little effect on sensemaking in web search engines ==> http://scholar.google.co.uk/citations?view_op=view_citation&hl=en&oe=ASCII&user=pu0mIWgAAAAJ&citation_for_view=pu0mIWgAAAAJ:u-x6o8ySG0sC
"""
if len(sys.argv) != 2:
print "Error: You have not given the script an URL"
print "Try again, and try running something such as:"
print "$ python scrape_author.py \"http://scholar.google.co.uk/citations?user=pu0mIWgAAAAJ&hl=en\""
exit()
else:
url_to_scrape = sys.argv[1]
if sys.argv[1].strip() == "help":
print "You must run the following command"
print "$ python scrape_author.py \"someurl\""
print "someurl - has to be surrounded by quotation marks"
print "it must also be a page which is for a specific author."
print "contact @jonhurlock on twitter for more information."
exit()
# go get content from an URL
#url_to_scrape = "http://scholar.google.co.uk/citations?user=pu0mIWgAAAAJ&hl=en"
f = urllib.urlopen(url_to_scrape)
html_doc = f.read()
# convert output to something beautiful soup can use
soup = BeautifulSoup(html_doc)
# Get the Authors Name
author_name = ""
divs = soup.find_all('div') # says get all the divs
for div in divs:
if div.has_attr('id'):
if div['id']=='gsc_prf_in':
author_name = div.get_text(strip=True)
if type(author_name)==unicode:
author_name = (unicodedata.normalize('NFKD', author_name).encode('ascii','ignore')).strip()
# Get the Publications and Links to
publications = []
# some setup stuff
tables = soup.find_all('table') # says get all the divs
publication_table = None
# traverse the DOM tree by divs
for table in tables:
if table.has_attr('id'):
if table['id']=='gsc_a_t':
publication_table = table.find_all('td')
for data in publication_table:
if u'gsc_a_t' in data['class']:
# papers title
paper_title = data.a.get_text(strip=True)
if type(paper_title)==unicode:
paper_title = (unicodedata.normalize('NFKD', paper_title).encode('ascii','ignore')).strip()
# link to the paper
paper_link = data.a['href']
if type(paper_link)==unicode:
paper_link = (unicodedata.normalize('NFKD', paper_link).encode('ascii','ignore')).strip()
paper_link = 'http://scholar.google.co.uk'+paper_link
publications.append([paper_title,paper_link])
# Printing out the Info:
print 'Publications for '+author_name+':'
for publication in publications:
print "\n"+publication[0]+" ==> "+publication[1]