-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcrawwwly.py
377 lines (259 loc) · 14.9 KB
/
crawwwly.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
#!/usr/bin/python3
import csv
import os
from os import listdir
import fnmatch
import sys
import datetime
from datetime import datetime
import PIL
from PIL import Image
from PIL import ImageChops
import webbrowser
import codecs
import pandas
import pandas as pd
import requests
from requests.exceptions import ConnectionError
from pathlib import Path
import matplotlib.pyplot as plt
################################################
# SET TIME FOR BENCHMARKING
################################################
benchmarkStart = datetime.now()
###################################################
# DEFINE TIMESTAMP
###################################################
now = datetime.now()
datestamp = now.strftime("%m%d%Y-%H%M")
datestamp_Readable = now.strftime("%m/%d/%Y - %H:%M")
# print(datestamp) # debug printing timestamp (only date, really)
###################################################
# VALIDATING THE URLS
###################################################
print('\x1b[6;30;42m' + "Now validating urls..." + '\x1b[0m' + '\n')
df = pandas.read_csv('domains.csv')
with open('domains.csv') as domainCSV:
readCSV = csv.DictReader(domainCSV, delimiter=',')
for row in readCSV:
# print (row['\ufeffdomains']) # Debug to test that domains are read correctly
# identification = row['id'] # unused
domainname = row['domains'] # This defines the domains
simplename = row['simplename'] # This defines the simplename
validation = row['validation'] # This defines the validation row
try:
checkurl = requests.get(domainname)
if checkurl.status_code < 400:
print ('\u001b[37m' + " -- " + domainname + " has been validated." + '\x1b[0m' + '\n') # Adding some colors as well
except ConnectionError:
print ('\u001b[31m' + " -- " + domainname + " is not valid and will be skipped." + '\x1b[0m' + '\n') # Adding some colors as well
#print(df)
# df.loc[<row selection>, <column selection>]
df.loc[df.domains == domainname, 'validation'] = "false"
#print(df) # test output
df.to_csv (r'domains.csv', index= False, header = True)
###################################################
# READING THE CSV DOMAINS LIST
###################################################
with open('domains.csv') as domainCSV:
readCSV = csv.DictReader(domainCSV, delimiter=',')
for row in readCSV:
# print (row['\ufeffdomains']) # Debug to test that domains are read correctly
domainname = row['domains'] # This defines the domains
simplename = row['simplename'] # This defines the simplename
validation = row['validation'] # This defines the validation row
outputfilename = simplename + '-' + datestamp + '.png'
outputfilenamejpg = simplename + '-' + datestamp + '.jpg'
if validation != "false": # Do not work with urls that have not been validated.
print ('\x1b[6;30;42m' + 'Now scanning: ' + domainname + '\x1b[0m' + '\n') # Adding some colors as well
###################################################
# CHECK OR MAKE IMAGE DIRECTORIES
###################################################
domainfolder = ('results/' + simplename)
checkfolder = os.path.isdir(domainfolder)
# If folder doesn't exist, then create it.
if not checkfolder:
os.makedirs(domainfolder)
print('\x1b[3;37;40m' + ' -- New folder created for this domain' + '\x1b[0m')
###################################################
# RUNNING SELENIUM TO CAPTURE SCREENSHOTS
###################################################
os.system ('python3 crawwwly-selenium.py --url ' + domainname + ' --output results/' + simplename + '/' + outputfilename)
print ('\x1b[3;37;40m' + ' -- File saved as ' + outputfilename + '\x1b[0m') # Log that file was saved
###################################################
# CONVERT FROM PNG TO JPG
# (because alpha layers mess with differences mapping)
###################################################
try:
png = Image.open('results/' + simplename + '/' + outputfilename)
png.load() # required for png.split()
background = Image.new("RGB", png.size, (255, 255, 255))
background.paste(png, mask=png.split()[3]) # 3 is the alpha channel
background.save('results/' + simplename + '/' + outputfilenamejpg, 'JPEG', quality=80)
os.remove('results/' + simplename + '/' + outputfilename) # deletes .png file from taking up space
except FileNotFoundError:
print("Looks like the file wasn't created because the site didn't get scraped properly.")
###################################################
# CROP AND SAVE FOR HOMEPAGE GRID
###################################################
try:
# Opens a image in RGB mode
toCrop = Image.open(r'results/' + simplename + '/' + outputfilenamejpg)
# Size of the image in pixels (size of orginal image)
# (This is not mandatory)
width, height = toCrop.size
# Setting the points for cropped image
left = 0
top = 0
right = width
bottom = 1440
# Cropped image of above dimension
# (It will not change orginal image)
croppedImage = toCrop.crop((left, top, right, bottom))
# Shows the image in image viewer
#croppedImage.show() Test to preview
croppedPath = 'results/' + simplename + '/' + 'homepage-thumb-' + simplename + '.jpg'
croppedImage.save(croppedPath, 'JPEG', quality=80)
except FileNotFoundError: # Error handling if original image wasn't created
print("Looks like the file wasn't created because the site didn't get scraped properly.")
###################################################
# GETTING THE TWO MOST RECENT FILES FOR COMPARISON
###################################################
print('\x1b[3;37;40m' + ' -- Queueing up images for comparison...' + '\x1b[0m')
path = 'results/' + simplename
files = sorted(fnmatch.filter(os.listdir(path), simplename+"*.jpg")) # Defined a sorted-by-name, only .jpg file list
try: # error handling if page isn't scraped the first time.
file_First = files[-1] # Define the most recent image
try:
file_Second = files[-2] # Define the second most recent image
except IndexError: # This handles index errors if a second image doesn't exist yet, i.e., you're scanning for the first time. It ends uo comparing against itself for a zero-diff.
file_Second = files[-1]
# else:
# print("Page not monitored.")
except IndexError: # This handles index errors in line 102 if the page wasn't scarped at all.
print("Page not monitored.")
# print('File 1 = ' + file_First) # Debug to test definition
# print('File 2 = ' + file_Second) # Debug to test definition
###################################################
# CREATE DIFF IMAGE
###################################################
path_one = path + '/' + file_First
path_two = path + '/' + file_Second
output_comparisons = path + '/' + "differences-" + datestamp + ".jpg"
print('\x1b[3;37;40m' + ' -- Comparing ' + file_First + ' and ' + file_Second + '\x1b[0m')
def compare_images(path_one, path_two, output_comparisons):
image1 = Image.open(path_one, mode='r')
image2 = Image.open(path_two, mode='r')
diff = (ImageChops.difference(image1, image2)) # Run the difference comparison
invert = ImageChops.invert(diff) # Invert the results b/c otherwise it is mostly black
invert.save(output_comparisons)
###################################################
# VIEWING DIFFERENCES IN REALTIME
# (for debugging)
###################################################
# differencesimage = Image.open(output_comparisons)
# differencesimage.show()
compare_images(path_one, path_two, output_comparisons)
print('\x1b[3;37;40m' + ' -- Comparison finished, output saved as ' + output_comparisons + '\x1b[0m')
###################################################
# CALCULATE DIFFERENCE PERCENTAGE AS NON-WHITE AREA PERCENTAGE
###################################################
im = Image.open(output_comparisons)
white = 0
other = 0
for pixel in im.getdata():
if pixel == (255, 255, 255): # if your image is RGB (if RGBA, (0, 0, 0, 255) or so
white += 1
else:
other += 1
# print('white=' + str(white)+', Other='+str(other)) Testing calculations.
calcDiff = (other / (white + other) * 100)
diffPercentageRounded = str(round(calcDiff, 2)) #Same thing, but rounded as a two decimal variable
print(" -- Differences are calculated at " + diffPercentageRounded + "%")
###################################################
# WRITE DIFFERENCES TO CSV
###################################################
datestamp2 = now.strftime("%m/%d/%Y")
stringDatestamp = str(datestamp2)
DiffLog = Path(path + '/' + "diff-history.csv")
if DiffLog.is_file():
writeDiffLog = open(DiffLog,"a+") # open in append mode
writeDiffLog.write(datestamp2 + "," + diffPercentageRounded + "\n")
writeDiffLog.close()
else:
writeDiffLog = open(DiffLog,"w+") # open in write mode
writeDiffLog.write("timestamp,difference\n") # adds a header b/c the file did not exist before
writeDiffLog.write(datestamp2 + "," + diffPercentageRounded + "\n")
writeDiffLog.close()
###################################################
# CREATE BAR CHART
###################################################
dataframeDiff = pd.read_csv(path + '/' + "diff-history.csv")
# print (dataframeDiff) # Testing data that was read.
diffplot = plt.bar(x=dataframeDiff['timestamp'], height=dataframeDiff['difference'])
plt.xticks(rotation=45) # Rotates the angle of the x-axis labels
plt.tight_layout() # Ensures the labels don't fall off the page
plt.savefig(path + '/' + "diffplot.png")
plt.clf() # Clean plot cache so that it doesn't keep adding every loop.
plotPath = str(path + '/' + "diffplot.png") # save plotpath as a variable for future reference
###################################################
# BUILD HTML REPORT SNIPPETS
###################################################
print('\x1b[3;37;40m' + ' -- Building report snippets' + '\x1b[0m')
writeHTMLSnippets = open(path + "-snippets.html","w+") # Create/open a snippets file to create reusable snippets for the report
startImg = """<img class=\"img-large\" src=\"images/"""
endImg = """\">"""
writeHTMLSnippets.write(
""" <div class=\"clearfix snippet-container\">
<div class=\"clearfix\" style=\"margin-top25px;\"><h2>""" + simplename + """, """ + domainname + """</h2><br><p>""" + datestamp_Readable + """</p></div>\r\n
<div><img src=\"""" + plotPath + """\"></div>\r\n
<div class=\"clearfix\"><div class=\"image-container\">\r\n
<h3>Current Site</h3>\r\n
<a href=\"""" + path_one + """\"><img class=\"imagediff\" src=\"""" + path_one + """\"></a>\r\n
</div> \r\n
<div class=\"image-container\">\r\n
<h3>Comparative Differences (""" + diffPercentageRounded + """%)</h3>\r\n
<a href=\"""" + output_comparisons + """\"><img class=\"imagediff\" src=\"""" + output_comparisons + """\"></a>\r\n
</div>\r\n
<div class=\"image-container\">\r\n
<h3>Previous Capture</h3>\r\n
<a href=\"""" + path_two + """\"><img class=\"imagediff\" src=\"""" + path_two + """\"></a>\r\n
</div>\r\n
</div>
</div>\r\n
"""
)
writeHTMLSnippets.close()
print('\x1b[3;37;40m' + ' -- Finished building report snippets' + '\x1b[0m' + '\n')
###################################################
# COMBINE SNIPPETS INTO HTML REPORT
###################################################
print ('\x1b[6;30;42m' + 'Assembling full report...' + '\x1b[0m' + '\n') # Adding some colors as well
writeHTML_Report = open("Report.html","w+") # Create/open a snippets file to create reusable snippets for the report
# Assemble the top section
readTop=codecs.open('top.html', 'rb', encoding='utf-8')
writeHTML_Report.write(readTop.read())
writeHTML_Report.write("\n")
# Iterating through the snippets to add into the full report
reportSnippet_dirs = sorted(os.listdir("results"))
for file in reportSnippet_dirs:
if file.endswith(".html"): # Reading only .html files
# This is a loop for each file.
# It reads each file and reads the content to the writeFile, then adds a line break ("\n")
readFile=codecs.open('results/' + file, 'rb', encoding='utf-8')
writeHTML_Report.write(readFile.read())
writeHTML_Report.write("\n")
# Assemble the bottom section
writeHTML_Report.write("""</body></html>""")
# Close the file
writeHTML_Report.close()
print ('\x1b[6;30;42m' + 'Report completed' + '\x1b[0m' + '\n') # Adding some colors as well
# Open the file to view the report.
webbrowser.open('file://' + os.path.realpath("Report.html"))
################################################
# FINALIZE BENCHMARK
################################################
benchmarkFinish = datetime.now()
benchmarkDiff = benchmarkFinish - benchmarkStart
benchmarkDiffstr = str(benchmarkDiff)
print('Benchmarking the time to finish executing: ' + benchmarkDiffstr)