-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlog_parse.py
143 lines (110 loc) · 3.24 KB
/
log_parse.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# -*- encoding: utf-8 -*-
import re
import datetime
from collections import Counter
from datetime import datetime
from collections import defaultdict
def get_logs(line):
if re.fullmatch(r'\[\d\d/\w{3}/\d{4}\s\d\d:\d\d:\d\d\]\s\"\w+\s\S+\s\S+\"\s\d+\s\d+\s', line):
return True
return False
def www(ignore_www, URL):
if ignore_www:
if URL.startswith('www.'):
return URL[4:]
return URL
def start(start_at, line):
if start_at:
data = datetime.strptime(line[1:line.index(']')], '%d/%b/%Y %X')
start_at = datetime.strptime(start_at,'%d/%b/%Y %X')
if data < start_at:
return True
return False
def stop(stop_at, line):
if stop_at:
data = datetime.strptime(line[1:line.index(']')], '%d/%b/%Y %X')
stop_at = datetime.strptime(stop_at,'%d/%b/%Y %X')
if data > stop_at:
return True
return False
def ignore(ignore_files, URL):
if ignore_files:
beg = URL.rfind('.')
postfix = URL[beg + 1:]
if postfix.isalpha() and 3 <= len(postfix) <= 4:
return True
return False
def request(request_type,line):
if request_type:
start = line.find('\"')
end = line.find(' ', start+1)
type = line[start+1: end]
if request_type == type:
return False
return True
return False
def find_url(log):
start = log.find('http')
end = log.find(' ', start+1)
url = log[start: end]
url = url.replace('https://', '')
url = url.replace('http://', '')
pos = url.find('?')
url = url[0: pos]
return url
def urls_ignore(ignore_urls, URL):
for ign_url in ignore_urls:
if URL == ign_url:
return True
else:
return False
def slowest(d, line ,URL):
time = int(line[line.rfind(" ") + 1:])
d[URL] = [1, time]
def parse(
ignore_files=False,
ignore_urls=[],
start_at=None,
stop_at=None,
request_type=None,
ignore_www=False,
slow_queries=False
):
log_dict = defaultdict()
counter = Counter()
f = open('C:\\Users\\Acer\\PycharmProjects\\log_parse\\log.log.txt')
for line in f:
if get_logs(line):
if request(request_type, line):
continue
if start(start_at, line):
continue
if stop(stop_at, line):
break
URL = find_url(line)
if ignore(ignore_files, URL):
continue
if urls_ignore(ignore_urls, URL):
continue
URL = www(ignore_www, URL)
if slow_queries:
slowest(log_dict, line, URL)
else:
counter[URL] += 1
if slow_queries:
if len(log_dict) == 0:
return []
new_list = []
for value in log_dict.values():
new_list.append(int(value[1]) // value[0])
new_list = sorted(new_list, reverse=True)
if len(new_list) >= 5:
return [new_list[i] for i in range(5)]
else:
return [new_list[i] for i in range(len(new_list))]
else:
return [item[1] for item in counter.most_common(5)]
def main():
print(parse())
if __name__ == '__main__':
main()