mower-ng/mower/solvers/operator.py

207 lines
7.5 KiB
Python

from datetime import datetime, timedelta
from difflib import SequenceMatcher
import cv2
import numpy as np
from mower.data import agent_list
from mower.models import Digtal
from mower.solvers.navigation.utils import generate_name
from mower.utils import config
from mower.utils.graph import SceneGraphSolver
from mower.utils.image import cropimg, loadres, thres2
from mower.utils.log import logger
from mower.utils.rapidocr import engine
from mower.utils.recognize import Scene
from mower.utils.vector import va
profession_list = [
"PIONEER",
"WARRIOR",
"TANK",
"SNIPER",
"CASTER",
"MEDIC",
"SUPPORT",
"SPECIAL",
]
templates = {}
for p in profession_list:
templates[p] = {}
ekernel = np.ones((2, 2), np.uint8)
dkernel = np.ones((5, 5), np.uint8)
for name, data in agent_list.items():
eng = data["name"]
tpl = generate_name(eng, font_size=49, width=590, style="dark")
msk = cv2.dilate(tpl, dkernel, iterations=1)
tpl = cv2.erode(tpl, ekernel, iterations=1)
templates[data["profession"]][name] = tpl, msk, eng
class OperatorSolver(SceneGraphSolver):
def run(self):
self.last_operator = ""
self.repeat_count = 2
self.wait = 0.6
self.in_progress = False
self.operator_data = {}
logger.info("Start: 练度识别")
return super().run()
def profession(self):
img = cropimg(config.recog.gray, ((35, 900), (155, 1015)))
max_score, max_profession = 0, None
for p in profession_list:
tpl = loadres(f"operator/{p}", True)
result = cv2.matchTemplate(img, tpl, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
if max_val > max_score:
max_score, max_profession = max_val, p
return max_profession
def operator_name(self):
img = thres2(config.recog.gray, 200)
img = cropimg(img, ((20, 680), (630, 740)))
profession = self.profession()
ocr_result = engine(img, use_det=False, use_cls=False)[0][0][0]
logger.debug(f"{profession=} {ocr_result=}")
ocr_result = ocr_result.replace(".", "").strip()
if len(ocr_result) >= 8:
max_score, max_name = 0, ""
for name, data in templates[profession].items():
eng = data[-1]
similarity = SequenceMatcher(None, ocr_result, eng).ratio()
if similarity > 0.5:
logger.debug(f"{name=} {eng=} {similarity=}")
if similarity > max_score:
max_score, max_name = similarity, name
return max_name
min_score, min_name, min_eng = 1, "", ""
for name, (tpl, msk, eng) in templates[profession].items():
result = cv2.matchTemplate(img, tpl, cv2.TM_SQDIFF_NORMED, None, msk)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
if min_val > 0.5:
continue
logger.debug(f"{name=} {min_val=} {min_loc=} {tpl.shape=}")
if min_val < min_score and eng not in min_eng:
min_score, min_name, min_eng = min_val, name, eng
elif min_eng != "" and min_eng in eng:
min_score, min_name, min_eng = min_val, name, eng
return min_name
def level(self):
img = thres2(config.recog.gray, 250)
img = cropimg(img, ((1334, 175), (1456, 249)))
scale = 25 / 73
img = cv2.resize(img, None, None, scale, scale)
return config.recog.num.number_int("secret_front", img=img)
def elite(self):
img = cropimg(config.recog.gray, ((1320, 372), (1454, 451)))
max_score, max_elite = 0, None
for i in range(3):
tpl = loadres(f"operator/elite{i}", True)
result = cv2.matchTemplate(img, tpl, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
if max_val > max_score:
max_score, max_elite = max_val, i
return max_elite
def potential(self):
img = cropimg(config.recog.img, ((1598, 372), (1732, 460)))
max_score, max_potential = 0, None
for i in range(6):
tpl = loadres(f"operator/potential{i}")
result = cv2.matchTemplate(img, tpl, cv2.TM_CCOEFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
if max_val > max_score:
max_score, max_potential = max_val, i
return max_potential
def skill(self, elite):
skill_data = []
img = cropimg(config.recog.gray, ((1822, 542), (1844, 569)))
img = thres2(img, 200)
height = 18
scale = 25 / height
img = cv2.resize(img, None, None, scale, scale)
min_score, rank = 1, None
for i in range(1, 8):
im = Digtal().secret_front[i]
result = cv2.matchTemplate(img, im, cv2.TM_SQDIFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
if min_val < min_score:
min_score, rank = min_val, i
skill_area = [
((1322, 524), (1453, 623)),
((1453, 524), (1585, 623)),
((1585, 524), (1717, 623)),
]
w, h = 31, 30
left_list = [1326, 1458, 1589]
top = 502
for i, area in enumerate(skill_area):
if self.template_match("operator/no_skill", area)[0] >= 0.8:
break
if self.template_match("operator/skill_locked", area)[0] >= 0.8:
break
if rank < 7 or elite < 2:
skill_data.append(rank)
continue
left = left_list[i]
top_left = left, top
img = cropimg(config.recog.gray, (top_left, va(top_left, (w, h))))
min_score, min_mastery = 1, None
for j in range(4):
tpl = loadres(f"operator/mastery{j}", True)
result = cv2.matchTemplate(img, tpl, cv2.TM_SQDIFF_NORMED)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
if min_val < min_score:
min_score, min_mastery = max_val, j
skill_data.append(rank + min_mastery)
return skill_data
def transition(self):
if (scene := self.scene()) == Scene.OPERATOR_MANAGEMENT:
self.tap((143, 354))
elif scene == Scene.OPERATOR_DETAILS:
self.in_progress = True
self.swipe((1242, 747), (-800, 0), interval=0)
start_time = datetime.now()
name = self.operator_name()
potential = self.potential()
elite = self.elite()
level = self.level()
skill = self.skill(elite)
logger.info(f"{name=} {potential=} {elite=} {level=} {skill=}")
self.operator_data[name] = {
"potential": potential,
"elite": elite,
"level": level,
"skill": skill,
}
if name == self.last_operator:
self.repeat_count -= 1
if self.repeat_count <= 0:
return True
else:
self.repeat_count = 2
self.last_operator = name
remain_time = start_time - datetime.now() + timedelta(seconds=self.wait)
remain = remain_time.total_seconds()
if remain > 0:
self.sleep(remain)
else:
if self.in_progress and scene != Scene.CONNECTING:
self.wait += 0.01
self.scene_graph_step(Scene.OPERATOR_MANAGEMENT)