-
Notifications
You must be signed in to change notification settings - Fork 7
/
Copy pathmain.py
262 lines (240 loc) · 9.31 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
import time
import cv2
import pickle
import numpy as np
import sys
from collections import deque
import platform
from board_calibration_machine_learning import detect_board
from game import Game
from board_basics import Board_basics
from helper import perspective_transform
from speech import Speech_thread
from videocapture import Video_capture_thread
from languages import *
webcam_width = None
webcam_height = None
fps = None
comment_me = False
comment_opponent = False
calibrate = False
cap_index = 0
cap_api = cv2.CAP_ANY
voice_index = 0
language = English()
token = ""
is_broadcast = False
pgn_path = ''
move_search_depth = 1
video_path = ""
for argument in sys.argv:
if argument == "comment-me":
comment_me = True
elif argument == "comment-opponent":
comment_opponent = True
elif argument.startswith("cap="):
cap_index = int("".join(c for c in argument if c.isdigit()))
platform_name = platform.system()
if platform_name == "Darwin":
cap_api = cv2.CAP_AVFOUNDATION
elif platform_name == "Linux":
cap_api = cv2.CAP_V4L2
else:
cap_api = cv2.CAP_DSHOW
elif argument.startswith("voice="):
voice_index = int("".join(c for c in argument if c.isdigit()))
elif argument.startswith("lang="):
if "German" in argument:
language = German()
elif "Russian" in argument:
language = Russian()
elif "Turkish" in argument:
language = Turkish()
elif "Italian" in argument:
language = Italian()
elif "French" in argument:
language = French()
elif argument.startswith("token="):
token = argument[len("token="):].strip()
elif argument.startswith("pgn="):
is_broadcast = True
pgn_path = argument[len("pgn="):]
move_search_depth = 2
elif argument == "calibrate":
calibrate = True
elif argument.startswith("vpath="):
video_path = argument[len("vpath="):]
elif argument.startswith("width="):
webcam_width = int(argument[len("width="):])
elif argument.startswith("height="):
webcam_height = int(argument[len("height="):])
elif argument.startswith("fps="):
fps = int(argument[len("fps="):])
MOTION_START_THRESHOLD = 1.0
HISTORY = 100
MAX_MOVE_MEAN = 50
COUNTER_MAX_VALUE = 3
move_fgbg = cv2.createBackgroundSubtractorKNN()
motion_fgbg = cv2.createBackgroundSubtractorKNN(history=HISTORY)
video_capture_thread = Video_capture_thread()
video_capture_thread.daemon = True
if video_path:
video_capture_thread.capture = cv2.VideoCapture(video_path)
video_capture_thread.is_transcribe = True
else:
video_capture_thread.capture = cv2.VideoCapture(cap_index, cap_api)
if webcam_width is not None:
video_capture_thread.capture.set(cv2.CAP_PROP_FRAME_WIDTH, webcam_width)
if webcam_height is not None:
video_capture_thread.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, webcam_height)
if fps is not None:
video_capture_thread.capture.set(cv2.CAP_PROP_FPS, fps)
if calibrate:
corner_model = cv2.dnn.readNetFromONNX("yolo_corner.onnx")
piece_model = cv2.dnn.readNetFromONNX("cnn_piece.onnx")
color_model = cv2.dnn.readNetFromONNX("cnn_color.onnx")
for _ in range(10):
ret, frame = video_capture_thread.capture.read()
if ret == False:
print("Error reading frame. Please check your webcam connection.")
continue
is_detected = False
for _ in range(100):
ret, frame = video_capture_thread.capture.read()
if ret == False:
print("Error reading frame. Please check your webcam connection.")
continue
result = detect_board(frame, corner_model, piece_model, color_model)
if result:
pts1, side_view_compensation, rotation_count = result
roi_mask = None
is_detected = True
break
if not is_detected:
print("Could not detect the chess board.")
video_capture_thread.capture.release()
sys.exit(0)
else:
filename = 'constants.bin'
infile = open(filename, 'rb')
calibration_data = pickle.load(infile)
infile.close()
if calibration_data[0]:
pts1, side_view_compensation, rotation_count = calibration_data[1]
roi_mask = None
else:
corners, side_view_compensation, rotation_count, roi_mask = calibration_data[1]
pts1 = np.float32([list(corners[0][0]), list(corners[8][0]), list(corners[0][8]),
list(corners[8][8])])
video_capture_thread.start()
board_basics = Board_basics(side_view_compensation, rotation_count)
speech_thread = Speech_thread()
speech_thread.daemon = True
speech_thread.index = voice_index
speech_thread.is_broadcast = is_broadcast
speech_thread.start()
game = Game(board_basics, speech_thread, comment_me, comment_opponent,
language, token, roi_mask, is_broadcast, pgn_path)
def waitUntilMotionCompletes():
counter = 0
while counter < COUNTER_MAX_VALUE:
frame = video_capture_thread.get_frame()
frame = perspective_transform(frame, pts1)
fgmask = motion_fgbg.apply(frame)
ret, fgmask = cv2.threshold(fgmask, 250, 255, cv2.THRESH_BINARY)
mean = fgmask.mean()
if mean < MOTION_START_THRESHOLD:
counter += 1
else:
counter = 0
def stabilize_background_subtractors():
best_mean = float("inf")
counter = 0
while counter < COUNTER_MAX_VALUE:
frame = video_capture_thread.get_frame()
frame = perspective_transform(frame, pts1)
move_fgbg.apply(frame)
fgmask = motion_fgbg.apply(frame, learningRate=0.1)
ret, fgmask = cv2.threshold(fgmask, 250, 255, cv2.THRESH_BINARY)
mean = fgmask.mean()
if mean >= best_mean:
counter += 1
else:
best_mean = mean
counter = 0
best_mean = float("inf")
counter = 0
while counter < COUNTER_MAX_VALUE:
frame = video_capture_thread.get_frame()
frame = perspective_transform(frame, pts1)
fgmask = move_fgbg.apply(frame, learningRate=0.1)
ret, fgmask = cv2.threshold(fgmask, 250, 255, cv2.THRESH_BINARY)
motion_fgbg.apply(frame)
mean = fgmask.mean()
if mean >= best_mean:
counter += 1
else:
best_mean = mean
counter = 0
return frame
previous_frame = stabilize_background_subtractors()
previous_frame_queue = deque(maxlen=10)
previous_frame_queue.append(previous_frame)
speech_thread.put_text(language.game_started)
if game.commentator:
game.commentator.start()
while game.commentator.game_state.variant == 'wait':
time.sleep(0.1)
if game.commentator.game_state.variant == 'standard':
board_basics.initialize_ssim(previous_frame)
game.initialize_hog(previous_frame)
else:
board_basics.load_ssim()
game.load_hog()
else:
board_basics.initialize_ssim(previous_frame)
game.initialize_hog(previous_frame)
while not game.board.is_game_over() and not (game.commentator and game.commentator.game_state.resign_or_draw):
sys.stdout.flush()
frame = video_capture_thread.get_frame()
frame = perspective_transform(frame, pts1)
fgmask = motion_fgbg.apply(frame)
ret, fgmask = cv2.threshold(fgmask, 250, 255, cv2.THRESH_BINARY)
kernel = np.ones((11, 11), np.uint8)
fgmask = cv2.erode(fgmask, kernel, iterations=1)
mean = fgmask.mean()
if mean > MOTION_START_THRESHOLD:
# cv2.imwrite("motion.jpg", fgmask)
waitUntilMotionCompletes()
frame = video_capture_thread.get_frame()
frame = perspective_transform(frame, pts1)
fgmask = move_fgbg.apply(frame, learningRate=0.0)
if fgmask.mean() >= 10.0:
ret, fgmask = cv2.threshold(fgmask, 250, 255, cv2.THRESH_BINARY)
# print("Move mean " + str(fgmask.mean()))
if fgmask.mean() >= MAX_MOVE_MEAN:
fgmask = np.zeros(fgmask.shape, dtype=np.uint8)
motion_fgbg.apply(frame)
move_fgbg.apply(frame, learningRate=1.0)
last_frame = stabilize_background_subtractors()
previous_frame = previous_frame_queue[0]
for _ in range(move_search_depth):
if (game.is_light_change(last_frame) == False) and game.register_move(fgmask, previous_frame, last_frame):
pass
# cv2.imwrite(game.executed_moves[-1] + " frame.jpg", last_frame)
# cv2.imwrite(game.executed_moves[-1] + " mask.jpg", fgmask)
# cv2.imwrite(game.executed_moves[-1] + " background.jpg", previous_frame)
else:
break
# import uuid
# id = str(uuid.uuid1())
# cv2.imwrite(id+"frame_fail.jpg", last_frame)
# cv2.imwrite(id+"mask_fail.jpg", fgmask)
# cv2.imwrite(id+"background_fail.jpg", previous_frame)
previous_frame_queue = deque(maxlen=10)
previous_frame_queue.append(last_frame)
else:
move_fgbg.apply(frame)
previous_frame_queue.append(frame)
cv2.destroyAllWindows()
time.sleep(2)