-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathWebScrapper.py
85 lines (75 loc) · 2.61 KB
/
WebScrapper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
#import grequests
import requests
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import threading
from queue import Queue
import time
SEARCH_QUERY = "healthy living"
data = pd.read_csv('D:\Scrapping programs\healthy+living_urls.csv')
data = np.array(data)
urls = data[:,1]
dataset=[]
lock = threading.Lock()
q = Queue()
def myJob(url):
datapoint_dict = {}
single_soup = BeautifulSoup(requests.get(url, timeout=5).content,'html.parser')
#print("Object created")
# NO_OF_COMMENTS = single_soup.find(id="watch-discussion")
# print(NO_OF_COMMENTS)
YOUTUBE_CATEGORY = single_soup.find(class_="content watch-info-tag-list").findChildren("a")[0].text
channel_tag = single_soup.find(class_="yt-user-info").findChildren("a")[0]
PUBLISH_DATE = single_soup.find(class_="watch-time-text").text
DESCRIPTION = single_soup.find(id="eow-description").text
VIDEO_TITLE = single_soup.find("span", class_="watch-title").text.strip()
VIDEO_VIEWS = single_soup.find("div", class_="watch-view-count").text
try:
LIKES = single_soup.find('button', {"title": "I like this"}).findChildren("span")[0].text
except:
LIKES = "0"
try:
DISLIKES = single_soup.find('button', {"title": "I dislike this"}).findChildren("span")[0].text
except:
DISLIKES = "0"
CHANNEL_NAME = channel_tag.text.strip()
datapoint_dict['channel_name'] = CHANNEL_NAME
#datapoint_dict['total_subscribers'] = SUBCRIBER_COUNT
datapoint_dict['video_url'] = url
datapoint_dict['video_title'] = VIDEO_TITLE
datapoint_dict['video_views'] = VIDEO_VIEWS
datapoint_dict['likes'] = LIKES
datapoint_dict['dislikes'] = DISLIKES
datapoint_dict['description'] = DESCRIPTION
datapoint_dict['published_date'] = PUBLISH_DATE
datapoint_dict['youtube_category'] = YOUTUBE_CATEGORY
with lock:
dataset.append(datapoint_dict)
print(f"Video URL :- {url}")
print(f"Channel name: {CHANNEL_NAME}")
#print(f"Subscriber count: {SUBCRIBER_COUNT}")
print(f"Video Title: {VIDEO_TITLE}")
print(f"No of views= {VIDEO_VIEWS}")
print(f"Likes: {LIKES}")
print(f"Dislikes: {DISLIKES}")
print(f"Description: {DESCRIPTION}")
print(f"Published on: {PUBLISH_DATE}")
print(f"Youtube category: {YOUTUBE_CATEGORY}")
print(len(dataset))
print("--------------------------------------------------------")
def threader():
while True:
url = q.get()
myJob(url)
q.task_done()
for x in range(10):
t = threading.Thread(target = threader)
t.daemon = True
t.start()
for url in urls:
q.put(url)
q.join()
print(len(dataset))
data = pd.DataFrame(dataset)
data.to_csv(f'{SEARCH_QUERY}_dataset.csv')