Compare commits
7 commits
cee44523c6
...
167f1da9f6
Author | SHA1 | Date | |
---|---|---|---|
167f1da9f6 | |||
aa841e6a05 | |||
a9676965a9 | |||
6dfa1923bf | |||
eb149b0fbf | |||
2454541e06 | |||
44782a3117 |
51 changed files with 1383 additions and 1021 deletions
BIN
depot_test/output/matches_knn_only.png
(Stored with Git LFS)
Normal file
BIN
depot_test/output/matches_knn_only.png
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
depot_test/stitched_image_multi.png
(Stored with Git LFS)
Normal file
BIN
depot_test/stitched_image_multi.png
(Stored with Git LFS)
Normal file
Binary file not shown.
325
depot_test/仓库识别MobileNetV3 KNN copy.py
Normal file
325
depot_test/仓库识别MobileNetV3 KNN copy.py
Normal file
|
@ -0,0 +1,325 @@
|
|||
import glob
|
||||
import os
|
||||
import re # 用于解析文件名中的标签
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.optim as optim
|
||||
from PIL import Image
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
from torchvision import models, transforms
|
||||
from torchvision.datasets import ImageFolder
|
||||
from tqdm import tqdm # 用于显示进度条
|
||||
|
||||
# --- 配置参数 ---
|
||||
TRAIN_DIR = "训练集"
|
||||
VAL_TEST_DIR = "测试集" # 根据你的描述,验证集和测试集是同一个目录,文件格式相同
|
||||
IMAGE_SIZE = 224 # MobileNetV3 的标准输入大小
|
||||
BATCH_SIZE = 32
|
||||
NUM_EPOCHS = 20 # 可以根据需要调整
|
||||
LEARNING_RATE = 0.001 # 初始学习率
|
||||
SAVE_MODEL_PATH = "mobilenetv3_small_finetuned.pth"
|
||||
|
||||
# 自动检测设备
|
||||
device = torch.device("cuda")
|
||||
print(f"使用设备: {device}")
|
||||
|
||||
# --- 数据预处理和增强 ---
|
||||
# ImageNet 标准均值和标准差
|
||||
mean = [0.485, 0.456, 0.406]
|
||||
std = [0.229, 0.224, 0.225]
|
||||
|
||||
# 训练集数据增强和预处理
|
||||
train_transforms = transforms.Compose(
|
||||
[
|
||||
transforms.RandomResizedCrop(IMAGE_SIZE), # 随机裁剪并缩放
|
||||
transforms.RandomHorizontalFlip(), # 随机水平翻转
|
||||
transforms.ToTensor(), # 转换为 Tensor
|
||||
transforms.Normalize(mean, std), # 标准化
|
||||
]
|
||||
)
|
||||
|
||||
# 验证/测试集数据预处理 (不需要数据增强,只需要中心裁剪和标准化)
|
||||
val_test_transforms = transforms.Compose(
|
||||
[
|
||||
transforms.Resize(int(IMAGE_SIZE * 256 / 224)), # 缩放到较大尺寸
|
||||
transforms.CenterCrop(IMAGE_SIZE), # 中心裁剪到目标尺寸
|
||||
transforms.ToTensor(), # 转换为 Tensor
|
||||
transforms.Normalize(mean, std), # 标准化
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
# --- 自定义测试集 Dataset ---
|
||||
# 需要一个自定义 Dataset 来处理 "测试集/{idx}_{label}.png" 这种文件命名格式
|
||||
class CustomValTestDataset(Dataset):
|
||||
def __init__(self, root_dir, class_to_idx, transform=None):
|
||||
"""
|
||||
Args:
|
||||
root_dir (string): 数据集根目录 (e.g., "测试集").
|
||||
class_to_idx (dict): 从类别名称到索引的映射,与训练集一致。
|
||||
transform (callable, optional): 应用于图像的转换.
|
||||
"""
|
||||
self.root_dir = root_dir
|
||||
self.transform = transform
|
||||
self.class_to_idx = class_to_idx
|
||||
self.idx_to_class = {v: k for k, v in class_to_idx.items()}
|
||||
self.image_files = []
|
||||
self.labels = []
|
||||
|
||||
# 遍历目录下的所有png文件
|
||||
filepaths = glob.glob(os.path.join(root_dir, "*.png"))
|
||||
|
||||
# 解析文件名,提取标签
|
||||
pattern = re.compile(r"^\d+_([^_]+)\.png$") # 匹配 数字_标签.png
|
||||
|
||||
for filepath in filepaths:
|
||||
filename = os.path.basename(filepath)
|
||||
match = pattern.match(filename)
|
||||
if match:
|
||||
label_name = match.group(1)
|
||||
if label_name in self.class_to_idx:
|
||||
self.image_files.append(filepath)
|
||||
self.labels.append(self.class_to_idx[label_name])
|
||||
else:
|
||||
print(
|
||||
f"警告: 文件 '{filename}' 中的标签 '{label_name}' 不在训练集的类别中,将跳过。"
|
||||
)
|
||||
|
||||
print(f"加载了 {len(self.image_files)} 张验证/测试图片。")
|
||||
|
||||
def __len__(self):
|
||||
return len(self.image_files)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
if torch.is_tensor(idx):
|
||||
idx = idx.tolist()
|
||||
|
||||
img_path = self.image_files[idx]
|
||||
label = self.labels[idx]
|
||||
|
||||
# 打开图像,确保是 RGB (处理可能的灰度图)
|
||||
img = Image.open(img_path).convert("RGB")
|
||||
|
||||
if self.transform:
|
||||
img = self.transform(img)
|
||||
|
||||
return img, label
|
||||
|
||||
|
||||
# --- 加载数据 ---
|
||||
# 使用 ImageFolder 加载训练集,它会自动从目录名解析类别
|
||||
if not os.path.exists(TRAIN_DIR):
|
||||
print(
|
||||
f"错误: 训练集目录 '{TRAIN_DIR}' 不存在。请创建该目录并放入分类好的图片子目录。"
|
||||
)
|
||||
exit()
|
||||
|
||||
if not os.path.exists(VAL_TEST_DIR):
|
||||
print(f"错误: 验证/测试集目录 '{VAL_TEST_DIR}' 不存在。请创建该目录并放入图片。")
|
||||
exit()
|
||||
|
||||
|
||||
train_dataset = ImageFolder(TRAIN_DIR, transform=train_transforms)
|
||||
num_classes = len(train_dataset.classes)
|
||||
class_to_idx = train_dataset.class_to_idx # 获取类别到索引的映射
|
||||
|
||||
print(f"从训练集检测到 {num_classes} 个类别: {train_dataset.classes}")
|
||||
|
||||
# 使用自定义 Dataset 加载验证/测试集
|
||||
val_test_dataset = CustomValTestDataset(
|
||||
VAL_TEST_DIR, class_to_idx, transform=val_test_transforms
|
||||
)
|
||||
|
||||
# 创建 DataLoader
|
||||
train_loader = DataLoader(
|
||||
train_dataset, batch_size=BATCH_SIZE, shuffle=True
|
||||
) # num_workers 根据你的机器性能调整
|
||||
val_test_loader = DataLoader(val_test_dataset, batch_size=BATCH_SIZE, shuffle=False)
|
||||
|
||||
# --- 加载预训练的 MobileNetV3-Small 模型 ---
|
||||
# 使用 weights 参数来指定预训练权重
|
||||
# MobileNetV3_Small_Weights.IMAGENET1K_V1 是在 ImageNet 上预训练的权重
|
||||
try:
|
||||
model = models.mobilenet_v3_small(
|
||||
weights=models.MobileNet_V3_Small_Weights.IMAGENET1K_V1
|
||||
)
|
||||
print("成功加载预训练的 MobileNetV3-Small 模型 (ImageNet weights)。")
|
||||
except Exception as e:
|
||||
print(f"加载预训练模型失败: {e}")
|
||||
print("尝试加载不带权重的模型...")
|
||||
model = models.mobilenet_v3_small(weights=None)
|
||||
|
||||
|
||||
# --- 修改全连接层以匹配新的类别数量 ---
|
||||
# MobileNetV3 的分类器是 model.classifier
|
||||
# 最后一个线性层是 classifier[-1]
|
||||
num_ftrs = model.classifier[-1].in_features
|
||||
# 替换掉原来的全连接层
|
||||
model.classifier[-1] = nn.Linear(num_ftrs, num_classes)
|
||||
|
||||
model = model.to(device)
|
||||
|
||||
# --- 定义损失函数和优化器 ---
|
||||
criterion = nn.CrossEntropyLoss() # 交叉熵损失适用于分类问题
|
||||
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE) # Adam 优化器
|
||||
|
||||
# 可选:学习率调度器,帮助调整学习率
|
||||
# scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1) # 每7个epoch降低学习率
|
||||
|
||||
# --- 训练和评估函数 ---
|
||||
|
||||
|
||||
def train_epoch(model, train_loader, criterion, optimizer, device):
|
||||
model.train() # 设置模型为训练模式
|
||||
running_loss = 0.0
|
||||
correct_predictions = 0
|
||||
total_samples = 0
|
||||
|
||||
# 使用 tqdm 显示进度条
|
||||
for inputs, labels in tqdm(train_loader, desc="训练中"):
|
||||
inputs, labels = inputs.to(device), labels.to(device)
|
||||
|
||||
# 梯度清零
|
||||
optimizer.zero_grad()
|
||||
|
||||
# 前向传播
|
||||
outputs = model(inputs)
|
||||
loss = criterion(outputs, labels)
|
||||
|
||||
# 反向传播和优化
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
# 统计
|
||||
running_loss += loss.item() * inputs.size(0) # 累加 batch loss * batch size
|
||||
_, predicted = torch.max(outputs, 1) # 获取预测结果
|
||||
correct_predictions += (predicted == labels).sum().item()
|
||||
total_samples += labels.size(0)
|
||||
|
||||
epoch_loss = running_loss / total_samples
|
||||
epoch_accuracy = correct_predictions / total_samples
|
||||
return epoch_loss, epoch_accuracy
|
||||
|
||||
|
||||
def evaluate(model, data_loader, criterion, device, desc="评估中"):
|
||||
model.eval() # 设置模型为评估模式
|
||||
running_loss = 0.0
|
||||
correct_predictions = 0
|
||||
total_samples = 0
|
||||
|
||||
# 在评估阶段不计算梯度
|
||||
with torch.no_grad():
|
||||
# 使用 tqdm 显示进度条
|
||||
for inputs, labels in tqdm(data_loader, desc=desc):
|
||||
inputs, labels = inputs.to(device), labels.to(device)
|
||||
|
||||
# 前向传播
|
||||
outputs = model(inputs)
|
||||
loss = criterion(outputs, labels)
|
||||
|
||||
# 统计
|
||||
running_loss += loss.item() * inputs.size(0)
|
||||
_, predicted = torch.max(outputs, 1)
|
||||
correct_predictions += (predicted == labels).sum().item()
|
||||
total_samples += labels.size(0)
|
||||
|
||||
epoch_loss = running_loss / total_samples
|
||||
epoch_accuracy = correct_predictions / total_samples
|
||||
return epoch_loss, epoch_accuracy
|
||||
|
||||
|
||||
# --- 训练循环 ---
|
||||
best_val_accuracy = 0.0
|
||||
|
||||
print("\n开始训练...")
|
||||
for epoch in range(NUM_EPOCHS):
|
||||
print(f"\n--- Epoch {epoch + 1}/{NUM_EPOCHS} ---")
|
||||
|
||||
# 训练阶段
|
||||
train_loss, train_accuracy = train_epoch(
|
||||
model, train_loader, criterion, optimizer, device
|
||||
)
|
||||
print(
|
||||
f"Epoch {epoch + 1} 训练 Loss: {train_loss:.4f}, 准确率: {train_accuracy:.4f}"
|
||||
)
|
||||
|
||||
# 可选:学习率调度
|
||||
# if scheduler is not None:
|
||||
# scheduler.step()
|
||||
|
||||
# 验证/测试阶段
|
||||
val_loss, val_accuracy = evaluate(
|
||||
model, val_test_loader, criterion, device, desc="验证/测试中"
|
||||
)
|
||||
print(
|
||||
f"Epoch {epoch + 1} 验证/测试 Loss: {val_loss:.4f}, 准确率: {val_accuracy:.4f}"
|
||||
)
|
||||
|
||||
# 保存最优模型
|
||||
if val_accuracy > best_val_accuracy:
|
||||
best_val_accuracy = val_accuracy
|
||||
torch.save(model.state_dict(), SAVE_MODEL_PATH)
|
||||
print(
|
||||
f"保存了验证/测试集上最优的模型 (准确率: {best_val_accuracy:.4f}) 到 {SAVE_MODEL_PATH}"
|
||||
)
|
||||
|
||||
print("\n训练完成!")
|
||||
print(f"在验证/测试集上的最高准确率: {best_val_accuracy:.4f}")
|
||||
|
||||
# --- 可选: 加载并测试最优模型 ---
|
||||
# 加载保存的最优模型进行最终测试 (如果验证/测试集是同一个,这就是最终结果)
|
||||
print(f"\n加载最优模型 '{SAVE_MODEL_PATH}' 进行最终评估...")
|
||||
loaded_model = models.mobilenet_v3_small(weights=None) # 先加载一个空的模型结构
|
||||
loaded_model.classifier[-1] = nn.Linear(
|
||||
loaded_model.classifier[-1].in_features, num_classes
|
||||
) # 修改分类器
|
||||
loaded_model.load_state_dict(
|
||||
torch.load(SAVE_MODEL_PATH, map_location=device)
|
||||
) # 加载权重
|
||||
loaded_model = loaded_model.to(device)
|
||||
|
||||
final_test_loss, final_test_accuracy = evaluate(
|
||||
loaded_model, val_test_loader, criterion, device, desc="最终测试中"
|
||||
)
|
||||
print(
|
||||
f"\n最终测试 Loss: {final_test_loss:.4f}, 最终测试准确率: {final_test_accuracy:.4f}"
|
||||
)
|
||||
|
||||
|
||||
# --- 可选: 预测单张图片 ---
|
||||
|
||||
|
||||
# 假设你想预测一张名为 '测试集/some_image_X_label.png' 的图片
|
||||
def predict_single_image(image_path, model, class_to_idx, device, transform):
|
||||
model.eval()
|
||||
idx_to_class = {v: k for k, v in class_to_idx.items()}
|
||||
|
||||
try:
|
||||
img = Image.open(image_path).convert("RGB")
|
||||
img = transform(img).unsqueeze(0).to(device) # 添加 batch 维度并移动到设备
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(img)
|
||||
probabilities = torch.softmax(outputs, dim=1)[0] # 获取概率分布
|
||||
_, predicted_idx = torch.max(probabilities, 0) # 获取最高概率的索引
|
||||
predicted_label = idx_to_class[predicted_idx.item()]
|
||||
confidence = probabilities[predicted_idx].item()
|
||||
|
||||
print(f"\n预测图片: {image_path}")
|
||||
print(f"预测类别: {predicted_label}, 置信度: {confidence:.4f}")
|
||||
return predicted_label, confidence
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"错误: 图片文件 '{image_path}' 未找到。")
|
||||
return None, None
|
||||
except Exception as e:
|
||||
print(f"预测图片时发生错误: {e}")
|
||||
return None, None
|
||||
|
||||
|
||||
# # 示例预测 (取消注释以使用)
|
||||
example_image_path = r"测试集\27_基础作战记录.png" # 替换为你测试集中的实际文件路径
|
||||
predict_single_image(
|
||||
example_image_path, loaded_model, class_to_idx, device, val_test_transforms
|
||||
)
|
378
depot_test/仓库识别MobileNetV3 KNN.py
Normal file
378
depot_test/仓库识别MobileNetV3 KNN.py
Normal file
|
@ -0,0 +1,378 @@
|
|||
import json
|
||||
import lzma
|
||||
import os
|
||||
import pickle
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
import torchvision.transforms as transforms
|
||||
from PIL import Image, ImageDraw, ImageFont, ImageOps
|
||||
from sklearn.neighbors import KNeighborsClassifier
|
||||
from torchvision import models
|
||||
|
||||
CROP_SIZE = 130
|
||||
BORDER = 26
|
||||
size = CROP_SIZE * 2 - BORDER * 2
|
||||
|
||||
|
||||
# 定义特征提取器
|
||||
model = models.mobilenet_v3_small(weights="DEFAULT")
|
||||
|
||||
features_part = model.features
|
||||
avgpool = torch.nn.AdaptiveAvgPool2d(1)
|
||||
classifier_part_excluding_last = torch.nn.Sequential(
|
||||
*list(model.classifier.children())[:-1]
|
||||
)
|
||||
|
||||
feature_extractor = torch.nn.Sequential(
|
||||
features_part,
|
||||
avgpool,
|
||||
torch.nn.Flatten(start_dim=1),
|
||||
classifier_part_excluding_last,
|
||||
)
|
||||
feature_extractor.eval() # 切换到评估模式
|
||||
|
||||
|
||||
def 提取特征点(模板):
|
||||
"""使用MobileNetV3提取特征 (PyTorch版)"""
|
||||
# 将输入图像从BGR转换为RGB
|
||||
img_rgb = cv2.cvtColor(模板, cv2.COLOR_BGR2RGB)
|
||||
|
||||
# 定义图像预处理流程
|
||||
preprocess = transforms.Compose(
|
||||
[
|
||||
transforms.ToPILImage(), # 转换为PIL图像
|
||||
transforms.Resize(250), # 调整大小为224x224
|
||||
transforms.ToTensor(), # 转换为Tensor
|
||||
transforms.Normalize( # 归一化
|
||||
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
# 预处理图像
|
||||
img_tensor = preprocess(img_rgb)
|
||||
img_tensor = img_tensor.unsqueeze(0) # 增加batch维度
|
||||
|
||||
# 提取特征
|
||||
with torch.no_grad():
|
||||
features = feature_extractor(img_tensor)
|
||||
|
||||
# 将特征展平为一维
|
||||
features = features.flatten().numpy()
|
||||
|
||||
return features
|
||||
|
||||
|
||||
class DepotMatcher:
|
||||
def __init__(
|
||||
self,
|
||||
ref_table_json="./ArknightsGameData/zh_CN/gamedata/excel/item_table.json",
|
||||
icon_dir="./ArknightsResource/items/",
|
||||
ref_dir="depot_test/output/test/origin",
|
||||
roi_dir="depot_test/output/test/result",
|
||||
img_path=r"depot_test\stitched_image_multi.png",
|
||||
):
|
||||
# 初始化路径配置
|
||||
self.REF_DIR = ref_dir
|
||||
self.ROI_DIR = roi_dir
|
||||
self.IMG_PATH = img_path
|
||||
self.REF_TABLE_JSON = ref_table_json
|
||||
self.ICON_DIR = icon_dir
|
||||
|
||||
# 初始化算法参数
|
||||
self.HOUGH_PARAMS = dict(
|
||||
dp=5, minDist=230, param1=50, param2=30, minRadius=90, maxRadius=100
|
||||
)
|
||||
self.CROP_SIZE = 130
|
||||
self.BORDER = 26
|
||||
|
||||
# 运行时数据存储
|
||||
self.refs = None
|
||||
self.rois = []
|
||||
self.knn_results = []
|
||||
self.knn_model = None
|
||||
|
||||
def load_references(self):
|
||||
"""加载物品图标参考图(保留彩色)"""
|
||||
data = json.load(open(self.REF_TABLE_JSON, encoding="utf-8"))
|
||||
self.refs = {}
|
||||
size = self.CROP_SIZE * 2 - self.BORDER * 2
|
||||
|
||||
# 首先收集所有带有sortId的物品
|
||||
items_with_sort = []
|
||||
for item in data.get("items", {}).values():
|
||||
if item.get("classifyType") not in {"NORMAL", "CONSUME", "MATERIAL"}:
|
||||
continue
|
||||
|
||||
path = os.path.join(self.ICON_DIR, f"{item['iconId']}.png")
|
||||
if not os.path.exists(path):
|
||||
continue
|
||||
|
||||
# 保留彩色图像
|
||||
im = Image.open(path).resize((size, size))
|
||||
items_with_sort.append(
|
||||
{
|
||||
"name": item["name"],
|
||||
"array": np.array(im),
|
||||
"sortId": item.get("sortId", 0),
|
||||
}
|
||||
)
|
||||
|
||||
# 按sortId排序
|
||||
items_with_sort.sort(key=lambda x: x["sortId"])
|
||||
|
||||
# 创建最终的refs字典
|
||||
for item in items_with_sort:
|
||||
self.refs[item["name"]] = item["array"]
|
||||
|
||||
print(f"已加载 {len(self.refs)} 个参考图 (按sortId排序)")
|
||||
# 保存训练集图像
|
||||
os.makedirs("训练集", exist_ok=True)
|
||||
for name, array in self.refs.items():
|
||||
os.makedirs(f"训练集/{name}", exist_ok=True)
|
||||
path = os.path.join(f"训练集/{name}", f"{name}.png")
|
||||
im = Image.fromarray(array)
|
||||
cropped_im = im.crop((50, 30, 160, 140))
|
||||
cropped_im.save(path)
|
||||
|
||||
return self
|
||||
|
||||
def _process_circle(self, idx, circle, img):
|
||||
"""处理单个圆形区域,返回彩色图像数据"""
|
||||
x, y, r = circle
|
||||
# 裁剪包含圆形的更大区域
|
||||
crop = img[
|
||||
max(0, y - self.CROP_SIZE) : min(img.shape[0], y + self.CROP_SIZE),
|
||||
max(0, x - self.CROP_SIZE) : min(img.shape[1], x + self.CROP_SIZE),
|
||||
]
|
||||
|
||||
# 提取核心的彩色ROI区域
|
||||
color_roi = crop[self.BORDER : -self.BORDER, self.BORDER : -self.BORDER]
|
||||
|
||||
# 提取用于匹配的彩色区域
|
||||
color_sec = color_roi
|
||||
|
||||
return idx, color_sec, color_roi
|
||||
|
||||
def detect_and_crop(self):
|
||||
"""检测并裁剪截图区域"""
|
||||
img = cv2.imread(self.IMG_PATH)
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, **self.HOUGH_PARAMS)
|
||||
|
||||
# 处理检测到的圆形
|
||||
circles = np.round(circles[0]).astype(int)
|
||||
circles = sorted(circles, key=lambda c: (c[0], c[1])) # 按坐标排序
|
||||
|
||||
self.rois = []
|
||||
for idx, circle in enumerate(circles):
|
||||
result = self._process_circle(idx, circle, img)
|
||||
self.rois.append(result)
|
||||
|
||||
return self
|
||||
|
||||
def 训练并保存knn模型(self, images, labels, filename):
|
||||
"""训练并保存KNN模型"""
|
||||
knn_classifier = KNeighborsClassifier(
|
||||
weights="distance", n_neighbors=1, n_jobs=1
|
||||
)
|
||||
knn_classifier.fit(images, labels)
|
||||
|
||||
with lzma.open(filename, "wb") as f:
|
||||
pickle.dump(knn_classifier, f)
|
||||
|
||||
return knn_classifier
|
||||
|
||||
def 训练knn模型(self, 模型保存路径="depot_knn_model.xz"):
|
||||
"""训练并保存KNN模型(使用彩色图像)"""
|
||||
|
||||
# 准备训练数据
|
||||
images = []
|
||||
labels = []
|
||||
|
||||
for name, img_array in self.refs.items():
|
||||
features = 提取特征点(img_array)
|
||||
images.append(features)
|
||||
labels.append(name)
|
||||
|
||||
# 训练模型
|
||||
self.knn_model = self.训练并保存knn模型(images, labels, 模型保存路径)
|
||||
print(f"KNN模型训练完成,已保存到: {模型保存路径}")
|
||||
return self
|
||||
|
||||
def 使用knn预测(self, 测试图像):
|
||||
features = 提取特征点(测试图像)
|
||||
|
||||
# 预测
|
||||
预测结果 = self.knn_model.predict([features])
|
||||
|
||||
return 预测结果[0]
|
||||
|
||||
def match_items_knn_only(
|
||||
self,
|
||||
knn_model_path="depot_knn_model.xz",
|
||||
):
|
||||
"""仅使用KNN方法进行匹配"""
|
||||
self.knn_results = []
|
||||
newstart = None
|
||||
with lzma.open(knn_model_path, "rb") as f:
|
||||
self.knn_model = pickle.load(f)
|
||||
|
||||
os.makedirs("测试集", exist_ok=True)
|
||||
|
||||
for idx, color_sec_np, _ in self.rois:
|
||||
# KNN预测
|
||||
roi_gray = cv2.cvtColor(color_sec_np, cv2.COLOR_RGB2GRAY)
|
||||
knn_name = self.使用knn预测(roi_gray)
|
||||
|
||||
self.knn_results.append((idx, knn_name))
|
||||
os.makedirs(f"测试集/{knn_name}", exist_ok=True)
|
||||
Image.fromarray(cv2.cvtColor(color_sec_np, cv2.COLOR_BGR2RGB)).crop(
|
||||
(50, 30, 160, 140)
|
||||
).save(os.path.join(f"测试集/{knn_name}", f"{idx}_{knn_name}.png"))
|
||||
# 更新newstart逻辑
|
||||
newstart = knn_name
|
||||
|
||||
print(f"ROI {idx}: Hog+Knn={knn_name}, newstart={newstart}")
|
||||
|
||||
return self
|
||||
|
||||
def display_results(self):
|
||||
"""可视化匹配结果"""
|
||||
ROW_LIMIT = 9
|
||||
|
||||
# 获取一个参考图像的尺寸作为空白图像的基础
|
||||
blank_ref_np = next(iter(self.refs.values()))
|
||||
blank_img_pil = Image.new(
|
||||
"RGB", (blank_ref_np.shape[1], blank_ref_np.shape[0]), (200, 200, 200)
|
||||
)
|
||||
|
||||
combined_images = []
|
||||
current_row_images = []
|
||||
current_row_width = 0
|
||||
max_row_height = 0
|
||||
|
||||
for idx, color_sec_np, color_roi_data in self.rois:
|
||||
color_roi_data = Image.fromarray(
|
||||
cv2.cvtColor(color_roi_data, cv2.COLOR_BGR2RGB)
|
||||
)
|
||||
color_sec_np = Image.fromarray(
|
||||
cv2.cvtColor(color_sec_np, cv2.COLOR_BGR2RGB)
|
||||
)
|
||||
|
||||
# 获取KNN匹配结果
|
||||
k_res_details = next(
|
||||
(d for d in getattr(self, "knn_results", []) if d[0] == idx), None
|
||||
)
|
||||
k_res_name = k_res_details[1] if k_res_details else None
|
||||
k_ref_img = (
|
||||
Image.fromarray(self.refs[k_res_name]).convert("RGB")
|
||||
if k_res_name and k_res_name in self.refs
|
||||
else blank_img_pil.copy()
|
||||
)
|
||||
|
||||
# 计算组合尺寸
|
||||
combined_width = color_roi_data.width + color_sec_np.width + k_ref_img.width
|
||||
|
||||
combined_height = max(
|
||||
color_roi_data.height,
|
||||
color_sec_np.height,
|
||||
k_ref_img.height,
|
||||
)
|
||||
|
||||
# 创建组合图像
|
||||
combined = Image.new(
|
||||
"RGB", (combined_width, combined_height), (255, 255, 255)
|
||||
)
|
||||
x_offset = 0
|
||||
|
||||
# 粘贴各个部分
|
||||
combined.paste(color_roi_data, (x_offset, 0))
|
||||
x_offset += color_roi_data.width
|
||||
|
||||
combined.paste(color_sec_np, (x_offset, 0))
|
||||
x_offset += color_sec_np.width
|
||||
|
||||
combined.paste(k_ref_img, (x_offset, 0))
|
||||
x_offset += k_ref_img.width
|
||||
|
||||
# 添加标注
|
||||
draw = ImageDraw.Draw(combined)
|
||||
font = ImageFont.truetype("msyh.ttc", 16)
|
||||
|
||||
label = f"ROI {idx}\nHog+Knn: {k_res_name or 'None'}"
|
||||
|
||||
text_color = (0, 0, 0)
|
||||
|
||||
draw.text(
|
||||
(color_roi_data.width, color_sec_np.height),
|
||||
label,
|
||||
fill=text_color,
|
||||
font=font,
|
||||
)
|
||||
|
||||
# 添加边框
|
||||
combined_bordered = ImageOps.expand(combined, border=2, fill=(0, 0, 0))
|
||||
current_row_images.append(combined_bordered)
|
||||
current_row_width += combined_bordered.width
|
||||
max_row_height = max(max_row_height, combined_bordered.height)
|
||||
|
||||
# 检查是否需要换行
|
||||
if len(current_row_images) == ROW_LIMIT:
|
||||
row_img = Image.new(
|
||||
"RGB", (current_row_width, max_row_height), (255, 255, 255)
|
||||
)
|
||||
x = 0
|
||||
for img in current_row_images:
|
||||
row_img.paste(img, (x, 0))
|
||||
x += img.width
|
||||
combined_images.append(row_img)
|
||||
current_row_images = []
|
||||
current_row_width = 0
|
||||
max_row_height = 0
|
||||
|
||||
# 处理最后一行
|
||||
if current_row_images:
|
||||
row_img = Image.new(
|
||||
"RGB", (current_row_width, max_row_height), (255, 255, 255)
|
||||
)
|
||||
x = 0
|
||||
for img in current_row_images:
|
||||
row_img.paste(img, (x, 0))
|
||||
x += img.width
|
||||
combined_images.append(row_img)
|
||||
|
||||
# 生成最终图像
|
||||
if combined_images:
|
||||
total_height = sum(img.height for img in combined_images)
|
||||
max_width = max(img.width for img in combined_images)
|
||||
final_img = Image.new("RGB", (max_width, total_height), (255, 255, 255))
|
||||
|
||||
y = 0
|
||||
for img in combined_images:
|
||||
final_img.paste(img, (0, y))
|
||||
y += img.height
|
||||
|
||||
output_path = "depot_test/output/matches_knn_only.png"
|
||||
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
||||
final_img.save(output_path)
|
||||
print(f"结果图像已保存至: {output_path}")
|
||||
|
||||
return self
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 使用示例
|
||||
matcher = DepotMatcher()
|
||||
matcher.load_references()
|
||||
matcher.训练knn模型()
|
||||
from datetime import datetime
|
||||
|
||||
now_time = datetime.now()
|
||||
|
||||
matcher.detect_and_crop()
|
||||
matcher.match_items_knn_only()
|
||||
print(datetime.now() - now_time)
|
||||
matcher.display_results()
|
|
@ -1,188 +0,0 @@
|
|||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from multiprocessing import Pool
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image, ImageDraw, ImageFont, ImageOps
|
||||
from skimage.metrics import structural_similarity
|
||||
|
||||
now = datetime.now()
|
||||
# 配置路径
|
||||
REF_DIR = r"depot_test\output/test/origin"
|
||||
ROI_DIR = r"depot_test\output/test/result"
|
||||
IMG_PATH = r"depot_test\result_refined.png"
|
||||
REF_TABLE_JSON = "./ArknightsGameData/zh_CN/gamedata/excel/item_table.json"
|
||||
ICON_DIR = "./ArknightsResource/items/"
|
||||
|
||||
# SSIM阈值
|
||||
SSIM_THRESHOLD = 0.01
|
||||
|
||||
# 圆检测参数
|
||||
HOUGH_PARAMS = dict(
|
||||
dp=5, minDist=230, param1=50, param2=30, minRadius=90, maxRadius=100
|
||||
)
|
||||
CROP_SIZE = 130
|
||||
BORDER = 26
|
||||
SECONDARY_SLICE = (slice(30, 140), slice(50, 160))
|
||||
|
||||
|
||||
def load_references(table_json, icon_dir):
|
||||
data = json.load(open(table_json, encoding="utf-8"))
|
||||
refs = {}
|
||||
size = CROP_SIZE * 2 - BORDER * 2
|
||||
for item in data.get("items", {}).values():
|
||||
t = item.get("classifyType")
|
||||
if t not in {"NORMAL", "CONSUME", "MATERIAL"}:
|
||||
continue
|
||||
path = os.path.join(icon_dir, f"{item['iconId']}.png")
|
||||
if not os.path.exists(path):
|
||||
continue
|
||||
im = Image.open(path).resize((size, size)).crop((50, 30, 160, 140)).convert("L")
|
||||
refs[item["name"]] = np.array(im)
|
||||
print(f"已加载 {len(refs)} 个参考图,保存于 {REF_DIR}")
|
||||
return refs
|
||||
|
||||
|
||||
def process_circle(idx, circle, img, rois, size, dr):
|
||||
x, y, r = circle
|
||||
crop = img[
|
||||
max(0, y - CROP_SIZE) : min(img.shape[0], y + CROP_SIZE),
|
||||
max(0, x - CROP_SIZE) : min(img.shape[1], x + CROP_SIZE),
|
||||
]
|
||||
c = crop[BORDER:-BORDER, BORDER:-BORDER]
|
||||
sec = c[SECONDARY_SLICE[0], SECONDARY_SLICE[1]]
|
||||
gray_sec = cv2.cvtColor(sec, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# 保存 ROI 图像
|
||||
os.makedirs(ROI_DIR, exist_ok=True)
|
||||
roi_path = os.path.join(ROI_DIR, f"roi_{idx}.png")
|
||||
cv2.imwrite(roi_path, gray_sec)
|
||||
rois.append(gray_sec)
|
||||
return idx, gray_sec, roi_path
|
||||
|
||||
|
||||
def detect_and_crop(image_path):
|
||||
img = cv2.imread(image_path)
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, **HOUGH_PARAMS)
|
||||
if circles is None:
|
||||
print("未检测到圆形区域")
|
||||
return []
|
||||
circles = np.round(circles[0]).astype(int)
|
||||
rois = []
|
||||
size = CROP_SIZE * 2 - BORDER * 2
|
||||
dr = img.max() - img.min()
|
||||
|
||||
# 使用多进程加速
|
||||
with Pool() as pool:
|
||||
results = pool.starmap(
|
||||
process_circle,
|
||||
[(idx, circle, img, rois, size, dr) for idx, circle in enumerate(circles)],
|
||||
)
|
||||
|
||||
return [roi for idx, roi, path in results]
|
||||
|
||||
|
||||
def process_match(idx, roi, refs, thresh):
|
||||
best, score = "Unknown", -1
|
||||
dr = roi.max() - roi.min()
|
||||
for name, ref in refs.items():
|
||||
if roi.shape != ref.shape:
|
||||
continue
|
||||
s = structural_similarity(roi, ref, data_range=dr)
|
||||
if s > score:
|
||||
best, score = name, s
|
||||
if score >= thresh:
|
||||
return idx, best, score
|
||||
return idx, None, score
|
||||
|
||||
|
||||
def match_ssim(rois, refs, thresh=SSIM_THRESHOLD):
|
||||
from multiprocessing import Pool
|
||||
|
||||
args = [(idx, roi, refs, thresh) for idx, roi in enumerate(rois)]
|
||||
with Pool(processes=5) as pool:
|
||||
results = pool.starmap(process_match, args)
|
||||
|
||||
stats = {}
|
||||
match_idx = {}
|
||||
for idx, name, score in results:
|
||||
if name:
|
||||
stats[name] = stats.get(name, 0) + 1
|
||||
match_idx[idx] = name
|
||||
print(f"ROI {idx} 匹配结果: {name if name else 'Unknown'} (SSIM={score:.3f})")
|
||||
return stats, match_idx
|
||||
|
||||
|
||||
def display_matches(rois, match_idx, refs):
|
||||
ROW_LIMIT = 10 # 每行最多10个
|
||||
blank_ref = next(iter(refs.values())) # 取一个参考图的尺寸
|
||||
blank_img = Image.new(
|
||||
"RGB", (blank_ref.shape[1], blank_ref.shape[0]), (200, 200, 200)
|
||||
) # 灰色占位图
|
||||
|
||||
combined_images = []
|
||||
row_images = []
|
||||
|
||||
row_width = 0
|
||||
max_height = 0
|
||||
|
||||
for idx in range(len(rois)):
|
||||
roi_img = Image.fromarray(rois[idx]).convert("RGB")
|
||||
ref_name = match_idx.get(idx)
|
||||
if ref_name:
|
||||
ref_img = Image.fromarray(refs[ref_name]).convert("RGB")
|
||||
else:
|
||||
ref_img = blank_img.copy()
|
||||
|
||||
combined_width = roi_img.width + ref_img.width
|
||||
combined_height = max(roi_img.height, ref_img.height)
|
||||
|
||||
combined = Image.new("RGB", (combined_width, combined_height), (255, 255, 255))
|
||||
combined.paste(roi_img, (0, 0))
|
||||
combined.paste(ref_img, (roi_img.width, 0))
|
||||
|
||||
draw = ImageDraw.Draw(combined)
|
||||
|
||||
font = ImageFont.truetype("msyh.ttc", 20)
|
||||
|
||||
label = f"ROI {idx}: {ref_name if ref_name else 'Unknown'}"
|
||||
draw.text((5, 5), label, fill=(255, 0, 0), font=font)
|
||||
combined = ImageOps.expand(combined, border=2, fill=(0, 0, 0))
|
||||
row_images.append(combined)
|
||||
row_width += combined_width
|
||||
max_height = max(max_height, combined_height)
|
||||
|
||||
if len(row_images) == ROW_LIMIT or idx == len(rois) - 1:
|
||||
row_img = Image.new("RGB", (row_width, max_height), (255, 255, 255))
|
||||
x_offset = 0
|
||||
for img in row_images:
|
||||
row_img.paste(img, (x_offset, 0))
|
||||
x_offset += img.width
|
||||
combined_images.append(row_img)
|
||||
row_images = []
|
||||
row_width = 0
|
||||
max_height = 0
|
||||
|
||||
total_width = max(img.width for img in combined_images)
|
||||
total_height = sum(img.height for img in combined_images)
|
||||
|
||||
final_img = Image.new("RGB", (total_width, total_height), (255, 255, 255))
|
||||
y_offset = 0
|
||||
for img in combined_images:
|
||||
final_img.paste(img, (0, y_offset))
|
||||
y_offset += img.height
|
||||
|
||||
final_img.save("all_matches.png") # 或 final_img.save("all_matches.png")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
refs = load_references(REF_TABLE_JSON, ICON_DIR)
|
||||
rois = detect_and_crop(IMG_PATH)
|
||||
res, match_idx = match_ssim(rois, refs)
|
||||
|
||||
print("最终识别结果:", res)
|
||||
display_matches(rois, match_idx, refs)
|
||||
print(datetime.now() - now)
|
|
@ -1,309 +0,0 @@
|
|||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image, ImageDraw, ImageFont, ImageOps
|
||||
from skimage.metrics import structural_similarity as ssim
|
||||
|
||||
# 配置路径
|
||||
REF_DIR = r"depot_test\output/test/origin"
|
||||
ROI_DIR = r"depot_test\output/test/result"
|
||||
IMG_PATH = r"depot_test\output\result_template.png"
|
||||
REF_TABLE_JSON = "./ArknightsGameData/zh_CN/gamedata/excel/item_table.json"
|
||||
ICON_DIR = "./ArknightsResource/items/"
|
||||
|
||||
# 参数
|
||||
HOUGH_PARAMS = dict(
|
||||
dp=5, minDist=230, param1=50, param2=30, minRadius=90, maxRadius=100
|
||||
)
|
||||
CROP_SIZE = 130
|
||||
BORDER = 26
|
||||
SECONDARY_SLICE = (slice(30, 140), slice(50, 160))
|
||||
|
||||
|
||||
def load_references(table_json, icon_dir):
|
||||
data = json.load(open(table_json, encoding="utf-8"))
|
||||
refs = {}
|
||||
size = CROP_SIZE * 2 - BORDER * 2
|
||||
for item in data.get("items", {}).values():
|
||||
if item.get("classifyType") not in {"NORMAL", "CONSUME", "MATERIAL"}:
|
||||
continue
|
||||
path = os.path.join(icon_dir, f"{item['iconId']}.png")
|
||||
if not os.path.exists(path):
|
||||
continue
|
||||
im = Image.open(path).resize((size, size)).crop((50, 30, 160, 140)).convert("L")
|
||||
refs[item["name"]] = np.array(im)
|
||||
print(f"已加载 {len(refs)} 个参考图")
|
||||
return refs
|
||||
|
||||
|
||||
def process_circle(idx, circle, img):
|
||||
x, y, r = circle
|
||||
crop = img[
|
||||
max(0, y - CROP_SIZE) : min(img.shape[0], y + CROP_SIZE),
|
||||
max(0, x - CROP_SIZE) : min(img.shape[1], x + CROP_SIZE),
|
||||
]
|
||||
# Save color ROI
|
||||
os.makedirs(ROI_DIR, exist_ok=True)
|
||||
color_roi_path = os.path.join(ROI_DIR, f"color_roi_{idx}.png")
|
||||
cv2.imwrite(color_roi_path, crop[BORDER:-BORDER, BORDER:-BORDER])
|
||||
|
||||
c = crop[BORDER:-BORDER, BORDER:-BORDER]
|
||||
sec = c[SECONDARY_SLICE[0], SECONDARY_SLICE[1]]
|
||||
gray_sec = cv2.cvtColor(sec, cv2.COLOR_BGR2GRAY)
|
||||
roi_path = os.path.join(ROI_DIR, f"roi_{idx}.png")
|
||||
cv2.imwrite(roi_path, gray_sec)
|
||||
return idx, gray_sec, roi_path, color_roi_path
|
||||
|
||||
|
||||
def detect_and_crop(image_path):
|
||||
img = cv2.imread(image_path)
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, **HOUGH_PARAMS)
|
||||
if circles is None:
|
||||
print("未检测到圆形区域")
|
||||
return []
|
||||
|
||||
# Convert and sort circles by y then x coordinate
|
||||
circles = np.round(circles[0]).astype(int)
|
||||
circles = sorted(circles, key=lambda c: (c[0], c[1])) # Sort by y then x
|
||||
|
||||
results = []
|
||||
for idx, circle in enumerate(circles):
|
||||
result = process_circle(idx, circle, img)
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def match_template(rois, refs, thresh):
|
||||
results = []
|
||||
for idx, roi, _, _ in rois:
|
||||
best, max_val = "Unknown", -1.0
|
||||
roi_f = roi.astype(np.float32) / 255.0
|
||||
for name, ref in refs.items():
|
||||
ref_f = ref.astype(np.float32) / 255.0
|
||||
if roi_f.shape != ref_f.shape:
|
||||
continue
|
||||
res = cv2.matchTemplate(roi_f, ref_f, cv2.TM_CCOEFF_NORMED)
|
||||
val = float(res.max())
|
||||
if val > max_val:
|
||||
best, max_val = name, val
|
||||
if max_val >= thresh:
|
||||
results.append((idx, best, max_val))
|
||||
else:
|
||||
results.append((idx, None, max_val))
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def match_ssim(rois, refs, thresh):
|
||||
results = []
|
||||
|
||||
for idx, roi, _, _ in rois:
|
||||
best_match = "Unknown"
|
||||
max_combined_score = -1.0
|
||||
best_ssim = 0
|
||||
best_hist = 0
|
||||
best_edge = 0
|
||||
|
||||
# 预处理ROI
|
||||
roi_gray = roi if len(roi.shape) == 2 else cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
|
||||
roi_edges = cv2.Canny(roi_gray, 50, 150)
|
||||
roi_hist = cv2.calcHist([roi_gray], [0], None, [256], [0, 256])
|
||||
cv2.normalize(roi_hist, roi_hist, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX)
|
||||
|
||||
for name, ref in refs.items():
|
||||
if roi_gray.shape != ref.shape:
|
||||
continue
|
||||
|
||||
# 1. 计算SSIM相似度
|
||||
ssim_score, _ = ssim(roi_gray, ref, full=True)
|
||||
|
||||
# 2. 计算直方图相似度
|
||||
ref_hist = cv2.calcHist([ref], [0], None, [256], [0, 256])
|
||||
cv2.normalize(
|
||||
ref_hist, ref_hist, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX
|
||||
)
|
||||
hist_score = cv2.compareHist(roi_hist, ref_hist, cv2.HISTCMP_CORREL)
|
||||
|
||||
# 3. 计算边缘相似度
|
||||
ref_edges = cv2.Canny(ref, 50, 150)
|
||||
edge_intersection = np.sum(roi_edges * ref_edges)
|
||||
edge_union = np.sum(roi_edges + ref_edges)
|
||||
edge_score = edge_intersection / edge_union if edge_union > 0 else 0
|
||||
|
||||
# 加权综合评分 (可调整权重)
|
||||
combined_score = 0.6 * ssim_score + 0.2 * hist_score + 0.2 * edge_score
|
||||
|
||||
if combined_score > max_combined_score:
|
||||
best_match = name
|
||||
max_combined_score = combined_score
|
||||
best_ssim = ssim_score
|
||||
best_hist = hist_score
|
||||
best_edge = edge_score
|
||||
|
||||
# 动态阈值调整 (基于图像复杂度)
|
||||
roi_complexity = np.std(roi_gray) / 255.0
|
||||
dynamic_thresh = thresh * (1 + 0.3 * roi_complexity)
|
||||
|
||||
if max_combined_score >= dynamic_thresh:
|
||||
results.append(
|
||||
(idx, best_match, max_combined_score, best_ssim, best_hist, best_edge)
|
||||
)
|
||||
else:
|
||||
results.append(
|
||||
(idx, None, max_combined_score, best_ssim, best_hist, best_edge)
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def display_matches(rois, template_results, ssim_results, refs):
|
||||
ROW_LIMIT = 9
|
||||
blank_ref = next(iter(refs.values())) # 取一个参考图的尺寸
|
||||
blank_img = Image.new(
|
||||
"RGB", (blank_ref.shape[1], blank_ref.shape[0]), (200, 200, 200)
|
||||
) # 灰色占位图
|
||||
|
||||
combined_images = []
|
||||
row_images = []
|
||||
|
||||
row_width = 0
|
||||
max_height = 0
|
||||
|
||||
for item in rois:
|
||||
idx, _, _, color_path = item
|
||||
# Load color ROI image
|
||||
roi_img = Image.open(color_path).convert("RGB")
|
||||
|
||||
# Template matching result
|
||||
t_res = next((name for i, name, val in template_results if i == idx), None)
|
||||
t_val = next((val for i, name, val in template_results if i == idx), 0)
|
||||
if t_res is not None:
|
||||
t_ref_img = Image.fromarray(refs[t_res]).convert("RGB")
|
||||
else:
|
||||
t_ref_img = blank_img.copy()
|
||||
|
||||
# SSIM result - we need to get the detailed scores
|
||||
s_res = next((name for i, name, val in ssim_results if i == idx), None)
|
||||
s_val = next((val for i, name, val in ssim_results if i == idx), 0)
|
||||
if s_res is not None:
|
||||
s_ref_img = Image.fromarray(refs[s_res]).convert("RGB")
|
||||
else:
|
||||
s_ref_img = blank_img.copy()
|
||||
|
||||
# Combine Template Matching result (left) and SSIM result (right)
|
||||
combined_width = roi_img.width + t_ref_img.width + s_ref_img.width
|
||||
combined_height = max(roi_img.height, t_ref_img.height, s_ref_img.height)
|
||||
|
||||
combined = Image.new("RGB", (combined_width, combined_height), (255, 255, 255))
|
||||
combined.paste(roi_img, (0, 0))
|
||||
combined.paste(t_ref_img, (roi_img.width, 0))
|
||||
combined.paste(s_ref_img, (roi_img.width + t_ref_img.width, 0))
|
||||
|
||||
draw = ImageDraw.Draw(combined)
|
||||
font = ImageFont.truetype("msyh.ttc", 20)
|
||||
|
||||
# Get the detailed scores from the SSIM matching results
|
||||
ssim_details = next(
|
||||
(details for i, details in enumerate(ssim_results) if i == idx),
|
||||
(idx, None, 0, 0, 0, 0), # Default values if not found
|
||||
)
|
||||
best_ssim = ssim_details[3] if len(ssim_details) > 3 else 0
|
||||
best_hist = ssim_details[4] if len(ssim_details) > 4 else 0
|
||||
best_edge = ssim_details[5] if len(ssim_details) > 5 else 0
|
||||
|
||||
label = (
|
||||
f"ROI {idx} {best_ssim:.3f}{best_hist:.3f} {best_edge:.3f}\n"
|
||||
f"T({t_res if t_res else 'None'}, {t_val:.3f})\n"
|
||||
f"S({s_res if s_res else 'None'}, {s_val:.3f})"
|
||||
)
|
||||
|
||||
if t_res == s_res:
|
||||
draw.text(
|
||||
(roi_img.width, t_ref_img.height), label, fill=(255, 0, 0), font=font
|
||||
)
|
||||
else:
|
||||
draw.text(
|
||||
(roi_img.width, t_ref_img.height), label, fill=(255, 0, 255), font=font
|
||||
)
|
||||
combined = ImageOps.expand(combined, border=2, fill=(0, 0, 0))
|
||||
row_images.append(combined)
|
||||
row_width += combined_width
|
||||
max_height = max(max_height, combined_height)
|
||||
|
||||
if len(row_images) == ROW_LIMIT or idx == len(rois) - 1:
|
||||
row_img = Image.new("RGB", (row_width, max_height), (255, 255, 255))
|
||||
x_offset = 0
|
||||
for img in row_images:
|
||||
row_img.paste(img, (x_offset, 0))
|
||||
x_offset += img.width
|
||||
combined_images.append(row_img)
|
||||
row_images = []
|
||||
row_width = 0
|
||||
max_height = 0
|
||||
|
||||
total_width = max(img.width for img in combined_images)
|
||||
total_height = sum(img.height for img in combined_images)
|
||||
|
||||
final_img = Image.new("RGB", (total_width, total_height), (255, 255, 255))
|
||||
y_offset = 0
|
||||
for img in combined_images:
|
||||
final_img.paste(img, (0, y_offset))
|
||||
y_offset += img.height
|
||||
|
||||
final_img.save("depot_test/output/matches_all.png")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
refs = load_references(REF_TABLE_JSON, ICON_DIR)
|
||||
rois = detect_and_crop(IMG_PATH)
|
||||
|
||||
now = datetime.now()
|
||||
|
||||
Template_MATCH_THRESHOLD = 0.2
|
||||
print("\n=== 使用 Template Matching ===")
|
||||
results_template = match_template(rois, refs, Template_MATCH_THRESHOLD)
|
||||
print("\n模版匹配耗时:", datetime.now() - now)
|
||||
SSIM_MATCH_THRESHOLD = 0.05
|
||||
|
||||
now = datetime.now()
|
||||
print("\n=== 使用 SSIM ===")
|
||||
results_ssim = match_ssim(rois, refs, SSIM_MATCH_THRESHOLD)
|
||||
print("\nSSIM匹配耗时:", datetime.now() - now)
|
||||
|
||||
print("\n=== 结果对比 ===")
|
||||
for idx, _, _, _ in rois:
|
||||
# Template matching results
|
||||
t_res = next((name for i, name, val in results_template if i == idx), None)
|
||||
t_val = next((val for i, name, val in results_template if i == idx), 0)
|
||||
|
||||
# SSIM results - now includes detailed metrics
|
||||
s_res = next(
|
||||
(name for i, name, val, ssim, hist, edge in results_ssim if i == idx), None
|
||||
)
|
||||
s_val = next(
|
||||
(val for i, name, val, ssim, hist, edge in results_ssim if i == idx), 0
|
||||
)
|
||||
s_ssim = next(
|
||||
(ssim for i, name, val, ssim, hist, edge in results_ssim if i == idx), 0
|
||||
)
|
||||
s_hist = next(
|
||||
(hist for i, name, val, ssim, hist, edge in results_ssim if i == idx), 0
|
||||
)
|
||||
s_edge = next(
|
||||
(edge for i, name, val, ssim, hist, edge in results_ssim if i == idx), 0
|
||||
)
|
||||
|
||||
print(
|
||||
f"ROI {idx}:\n"
|
||||
f" Template=({t_res if t_res else 'None'}, {t_val:.3f})\n"
|
||||
f" SSIM=({s_res if s_res else 'None'}, {s_val:.3f})\n"
|
||||
f" Details: SSIM={s_ssim:.3f}, Hist={s_hist:.3f}, Edge={s_edge:.3f}"
|
||||
)
|
||||
|
||||
# Displaying matches side by side using the updated function
|
||||
display_matches(rois, results_template, results_ssim, refs)
|
|
@ -1,192 +0,0 @@
|
|||
import json
|
||||
import os
|
||||
from datetime import datetime
|
||||
from multiprocessing import Pool
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image, ImageDraw, ImageFont, ImageOps
|
||||
|
||||
now = datetime.now()
|
||||
# 配置路径
|
||||
REF_DIR = r"depot_test\output/test/origin"
|
||||
ROI_DIR = r"depot_test\output/test/result"
|
||||
IMG_PATH = r"depot_test\stitched_image_multi.png"
|
||||
REF_TABLE_JSON = "./ArknightsGameData/zh_CN/gamedata/excel/item_table.json"
|
||||
ICON_DIR = "./ArknightsResource/items/"
|
||||
|
||||
# SSIM阈值
|
||||
MATCH_THRESHOLD = 0.01
|
||||
|
||||
# 圆检测参数
|
||||
HOUGH_PARAMS = dict(
|
||||
dp=5, minDist=230, param1=50, param2=30, minRadius=90, maxRadius=100
|
||||
)
|
||||
CROP_SIZE = 130
|
||||
BORDER = 26
|
||||
SECONDARY_SLICE = (slice(30, 140), slice(50, 160))
|
||||
|
||||
|
||||
def load_references(table_json, icon_dir):
|
||||
data = json.load(open(table_json, encoding="utf-8"))
|
||||
refs = {}
|
||||
size = CROP_SIZE * 2 - BORDER * 2
|
||||
for item in data.get("items", {}).values():
|
||||
t = item.get("classifyType")
|
||||
if t not in {"NORMAL", "CONSUME", "MATERIAL"}:
|
||||
continue
|
||||
path = os.path.join(icon_dir, f"{item['iconId']}.png")
|
||||
if not os.path.exists(path):
|
||||
continue
|
||||
im = Image.open(path).resize((size, size)).crop((50, 30, 160, 140)).convert("L")
|
||||
refs[item["name"]] = np.array(im)
|
||||
print(f"已加载 {len(refs)} 个参考图,保存于 {REF_DIR}")
|
||||
return refs
|
||||
|
||||
|
||||
def process_circle(idx, circle, img, rois, size, dr):
|
||||
x, y, r = circle
|
||||
crop = img[
|
||||
max(0, y - CROP_SIZE) : min(img.shape[0], y + CROP_SIZE),
|
||||
max(0, x - CROP_SIZE) : min(img.shape[1], x + CROP_SIZE),
|
||||
]
|
||||
c = crop[BORDER:-BORDER, BORDER:-BORDER]
|
||||
sec = c[SECONDARY_SLICE[0], SECONDARY_SLICE[1]]
|
||||
gray_sec = cv2.cvtColor(sec, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# 保存 ROI 图像
|
||||
os.makedirs(ROI_DIR, exist_ok=True)
|
||||
roi_path = os.path.join(ROI_DIR, f"roi_{idx}.png")
|
||||
cv2.imwrite(roi_path, gray_sec)
|
||||
rois.append(gray_sec)
|
||||
return idx, gray_sec, roi_path
|
||||
|
||||
|
||||
def detect_and_crop(image_path):
|
||||
img = cv2.imread(image_path)
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, **HOUGH_PARAMS)
|
||||
if circles is None:
|
||||
print("未检测到圆形区域")
|
||||
return []
|
||||
circles = np.round(circles[0]).astype(int)
|
||||
rois = []
|
||||
size = CROP_SIZE * 2 - BORDER * 2
|
||||
dr = img.max() - img.min()
|
||||
|
||||
# 使用多进程加速
|
||||
with Pool() as pool:
|
||||
results = pool.starmap(
|
||||
process_circle,
|
||||
[(idx, circle, img, rois, size, dr) for idx, circle in enumerate(circles)],
|
||||
)
|
||||
|
||||
return [roi for idx, roi, path in results]
|
||||
|
||||
|
||||
def process_match(idx, roi, refs, thresh):
|
||||
best, max_val = "Unknown", -1.0
|
||||
# 模板匹配需要浮点图
|
||||
roi_f = roi.astype(np.float32) / 255.0
|
||||
for name, ref in refs.items():
|
||||
# ref 已经是灰度 NumPy 数组
|
||||
ref_f = ref.astype(np.float32) / 255.0
|
||||
if roi_f.shape != ref_f.shape:
|
||||
continue
|
||||
# 使用 TM_CCOEFF_NORMED,结果越接近 1 越匹配
|
||||
res = cv2.matchTemplate(roi_f, ref_f, cv2.TM_CCOEFF_NORMED)
|
||||
val = float(res.max())
|
||||
if val > max_val:
|
||||
best, max_val = name, val
|
||||
if max_val >= thresh:
|
||||
return idx, best, max_val
|
||||
return idx, None, max_val
|
||||
|
||||
|
||||
def match_ssim(rois, refs, thresh=MATCH_THRESHOLD):
|
||||
from multiprocessing import Pool
|
||||
|
||||
args = [(idx, roi, refs, thresh) for idx, roi in enumerate(rois)]
|
||||
with Pool(processes=5) as pool:
|
||||
results = pool.starmap(process_match, args)
|
||||
|
||||
stats = {}
|
||||
match_idx = {}
|
||||
for idx, name, score in results:
|
||||
if name:
|
||||
stats[name] = stats.get(name, 0) + 1
|
||||
match_idx[idx] = name
|
||||
print(f"ROI {idx} 匹配结果: {name if name else 'Unknown'} (SSIM={score:.3f})")
|
||||
return stats, match_idx
|
||||
|
||||
|
||||
def display_matches(rois, match_idx, refs):
|
||||
ROW_LIMIT = 10 # 每行最多10个
|
||||
blank_ref = next(iter(refs.values())) # 取一个参考图的尺寸
|
||||
blank_img = Image.new(
|
||||
"RGB", (blank_ref.shape[1], blank_ref.shape[0]), (200, 200, 200)
|
||||
) # 灰色占位图
|
||||
|
||||
combined_images = []
|
||||
row_images = []
|
||||
|
||||
row_width = 0
|
||||
max_height = 0
|
||||
|
||||
for idx in range(len(rois)):
|
||||
roi_img = Image.fromarray(rois[idx]).convert("RGB")
|
||||
ref_name = match_idx.get(idx)
|
||||
if ref_name:
|
||||
ref_img = Image.fromarray(refs[ref_name]).convert("RGB")
|
||||
else:
|
||||
ref_img = blank_img.copy()
|
||||
|
||||
combined_width = roi_img.width + ref_img.width
|
||||
combined_height = max(roi_img.height, ref_img.height)
|
||||
|
||||
combined = Image.new("RGB", (combined_width, combined_height), (255, 255, 255))
|
||||
combined.paste(roi_img, (0, 0))
|
||||
combined.paste(ref_img, (roi_img.width, 0))
|
||||
|
||||
draw = ImageDraw.Draw(combined)
|
||||
|
||||
font = ImageFont.truetype("msyh.ttc", 20)
|
||||
|
||||
label = f"ROI {idx}: {ref_name if ref_name else 'Unknown'}"
|
||||
draw.text((5, 5), label, fill=(255, 0, 0), font=font)
|
||||
combined = ImageOps.expand(combined, border=2, fill=(0, 0, 0))
|
||||
row_images.append(combined)
|
||||
row_width += combined_width
|
||||
max_height = max(max_height, combined_height)
|
||||
|
||||
if len(row_images) == ROW_LIMIT or idx == len(rois) - 1:
|
||||
row_img = Image.new("RGB", (row_width, max_height), (255, 255, 255))
|
||||
x_offset = 0
|
||||
for img in row_images:
|
||||
row_img.paste(img, (x_offset, 0))
|
||||
x_offset += img.width
|
||||
combined_images.append(row_img)
|
||||
row_images = []
|
||||
row_width = 0
|
||||
max_height = 0
|
||||
|
||||
total_width = max(img.width for img in combined_images)
|
||||
total_height = sum(img.height for img in combined_images)
|
||||
|
||||
final_img = Image.new("RGB", (total_width, total_height), (255, 255, 255))
|
||||
y_offset = 0
|
||||
for img in combined_images:
|
||||
final_img.paste(img, (0, y_offset))
|
||||
y_offset += img.height
|
||||
|
||||
final_img.save("all_matches.png") # 或 final_img.save("all_matches.png")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
refs = load_references(REF_TABLE_JSON, ICON_DIR)
|
||||
rois = detect_and_crop(IMG_PATH)
|
||||
res, match_idx = match_ssim(rois, refs)
|
||||
|
||||
print("最终识别结果:", res)
|
||||
display_matches(rois, match_idx, refs)
|
||||
print(datetime.now() - now)
|
130
depot_test/训练.py
Normal file
130
depot_test/训练.py
Normal file
|
@ -0,0 +1,130 @@
|
|||
import os
|
||||
import random
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
import torchvision.transforms as transforms
|
||||
from PIL import Image
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
from tqdm import tqdm
|
||||
|
||||
|
||||
# 1. 网络定义
|
||||
class SiameseNetwork(nn.Module):
|
||||
def __init__(self):
|
||||
super(SiameseNetwork, self).__init__()
|
||||
self.cnn = nn.Sequential(
|
||||
nn.Conv2d(3, 32, kernel_size=5), # (3,110,110) -> (32,106,106)
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(2), # -> (32,53,53)
|
||||
nn.Conv2d(32, 64, kernel_size=5), # -> (64,49,49)
|
||||
nn.ReLU(),
|
||||
nn.MaxPool2d(2), # -> (64,24,24)
|
||||
)
|
||||
self.fc = nn.Sequential(
|
||||
nn.Linear(64 * 24 * 24, 512), nn.ReLU(), nn.Linear(512, 128)
|
||||
)
|
||||
|
||||
def forward_once(self, x):
|
||||
x = self.cnn(x)
|
||||
x = x.view(x.size(0), -1)
|
||||
return self.fc(x)
|
||||
|
||||
def forward(self, input1, input2):
|
||||
output1 = self.forward_once(input1)
|
||||
output2 = self.forward_once(input2)
|
||||
return output1, output2
|
||||
|
||||
|
||||
# 2. Contrastive Loss
|
||||
class ContrastiveLoss(nn.Module):
|
||||
def __init__(self, margin=2.0):
|
||||
super(ContrastiveLoss, self).__init__()
|
||||
self.margin = margin
|
||||
|
||||
def forward(self, out1, out2, label):
|
||||
dist = F.pairwise_distance(out1, out2)
|
||||
loss = label * torch.pow(dist, 2) + (1 - label) * torch.pow(
|
||||
torch.clamp(self.margin - dist, min=0.0), 2
|
||||
)
|
||||
return loss.mean()
|
||||
|
||||
|
||||
# 3. Dataset 构造器
|
||||
class SiameseDataset(Dataset):
|
||||
def __init__(self, folder_path, transform=None):
|
||||
self.folder_path = folder_path
|
||||
self.classes = os.listdir(folder_path)
|
||||
self.transform = transform or transforms.ToTensor()
|
||||
|
||||
def __getitem__(self, index):
|
||||
class1 = random.choice(self.classes)
|
||||
class2 = (
|
||||
class1
|
||||
if random.random() < 0.5
|
||||
else random.choice([c for c in self.classes if c != class1])
|
||||
)
|
||||
|
||||
img1_path = os.path.join(
|
||||
self.folder_path,
|
||||
class1,
|
||||
random.choice(os.listdir(os.path.join(self.folder_path, class1))),
|
||||
)
|
||||
img2_path = os.path.join(
|
||||
self.folder_path,
|
||||
class2,
|
||||
random.choice(os.listdir(os.path.join(self.folder_path, class2))),
|
||||
)
|
||||
|
||||
img1 = Image.open(img1_path).convert("RGB").resize((110, 110))
|
||||
img2 = Image.open(img2_path).convert("RGB").resize((110, 110))
|
||||
|
||||
if self.transform:
|
||||
img1 = self.transform(img1)
|
||||
img2 = self.transform(img2)
|
||||
|
||||
label = 1.0 if class1 == class2 else 0.0
|
||||
return img1, img2, torch.tensor([label], dtype=torch.float32)
|
||||
|
||||
def __len__(self):
|
||||
return 5000
|
||||
|
||||
|
||||
# 4. 训练入口
|
||||
def train():
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
print(device)
|
||||
transform = transforms.Compose(
|
||||
[
|
||||
transforms.ToTensor(),
|
||||
]
|
||||
)
|
||||
|
||||
dataset = SiameseDataset("dataset/train", transform=transform)
|
||||
dataloader = DataLoader(dataset, batch_size=128, shuffle=True)
|
||||
|
||||
model = SiameseNetwork().to(device)
|
||||
criterion = ContrastiveLoss()
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
|
||||
|
||||
for epoch in range(100):
|
||||
total_loss = 0
|
||||
for img1, img2, label in tqdm(dataloader, desc=f"Epoch {epoch + 1}"):
|
||||
img1, img2, label = img1.to(device), img2.to(device), label.to(device)
|
||||
|
||||
out1, out2 = model(img1, img2)
|
||||
loss = criterion(out1, out2, label)
|
||||
optimizer.zero_grad()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
total_loss += loss.item()
|
||||
|
||||
print(f"Epoch {epoch + 1}, Loss: {total_loss / len(dataloader):.4f}")
|
||||
|
||||
torch.save(model.state_dict(), "siamese_model.pth")
|
||||
print("Model saved as siamese_model.pth")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
train()
|
82
depot_test/预测.py
Normal file
82
depot_test/预测.py
Normal file
|
@ -0,0 +1,82 @@
|
|||
import os
|
||||
|
||||
import torch
|
||||
import torch.nn.functional as F
|
||||
from PIL import Image
|
||||
from torchvision import transforms
|
||||
from 训练 import SiameseNetwork # 确保和训练脚本在同一目录
|
||||
|
||||
|
||||
# 加载模型
|
||||
def load_model(model_path, device):
|
||||
model = SiameseNetwork().to(device)
|
||||
model.load_state_dict(torch.load(model_path, map_location=device))
|
||||
model.eval()
|
||||
return model
|
||||
|
||||
|
||||
# 读取训练集中每个类别的图像(作为支持集)
|
||||
def load_train_class_images(train_dir, transform, device):
|
||||
class_images = {}
|
||||
for class_name in os.listdir(train_dir):
|
||||
class_path = os.path.join(train_dir, class_name)
|
||||
if not os.path.isdir(class_path):
|
||||
continue
|
||||
images = []
|
||||
for img_name in os.listdir(class_path):
|
||||
img_path = os.path.join(class_path, img_name)
|
||||
image = Image.open(img_path).convert("RGB").resize((110, 110))
|
||||
image = transform(image).unsqueeze(0).to(device) # shape: (1,3,110,110)
|
||||
images.append(image)
|
||||
if images:
|
||||
class_images[class_name] = images
|
||||
return class_images
|
||||
|
||||
|
||||
# 推理函数:返回最相似的类别名
|
||||
def predict(model, test_img, class_images):
|
||||
min_dist = float("inf")
|
||||
predicted_class = None
|
||||
|
||||
with torch.no_grad():
|
||||
for class_name, ref_images in class_images.items():
|
||||
for ref_img in ref_images:
|
||||
out1, out2 = model(test_img, ref_img)
|
||||
dist = F.pairwise_distance(out1, out2)
|
||||
if dist.item() < min_dist:
|
||||
min_dist = dist.item()
|
||||
predicted_class = class_name
|
||||
return predicted_class, min_dist
|
||||
|
||||
|
||||
# 主推理流程
|
||||
def infer():
|
||||
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
||||
model = load_model("siamese_model.pth", device)
|
||||
|
||||
transform = transforms.Compose(
|
||||
[
|
||||
transforms.ToTensor(),
|
||||
]
|
||||
)
|
||||
|
||||
train_dir = "dataset/train"
|
||||
test_dir = "dataset/test"
|
||||
class_images = load_train_class_images(train_dir, transform, device)
|
||||
|
||||
print("开始测试...")
|
||||
for class_name in os.listdir(test_dir):
|
||||
class_path = os.path.join(test_dir, class_name)
|
||||
for img_name in os.listdir(class_path):
|
||||
img_path = os.path.join(class_path, img_name)
|
||||
img = Image.open(img_path).convert("RGB").resize((110, 110))
|
||||
img_tensor = transform(img).unsqueeze(0).to(device)
|
||||
|
||||
predicted_class, dist = predict(model, img_tensor, class_images)
|
||||
print(
|
||||
f"Test Image: {img_name} | True: {class_name} | Predicted: {predicted_class} | Distance: {dist:.4f}"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
infer()
|
BIN
mower/resources/navigation/score/timeline/主题曲/15.png
(Stored with Git LFS)
Normal file
BIN
mower/resources/navigation/score/timeline/主题曲/15.png
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
mower/resources/skill_confirm.png
(Stored with Git LFS)
BIN
mower/resources/skill_confirm.png
(Stored with Git LFS)
Binary file not shown.
|
@ -32,7 +32,7 @@ class Translation(Base):
|
|||
__tablename__ = "translations"
|
||||
itemId = Column(String, ForeignKey("items.itemId"), primary_key=True)
|
||||
iconId = Column(String)
|
||||
name = Column(String)
|
||||
name = Column(String, index=True) # Added index for faster lookups by name
|
||||
classifyType = Column(String)
|
||||
sortId = Column(Integer)
|
||||
category = Column(String, default="K未分类")
|
||||
|
@ -158,12 +158,12 @@ class DepotManager:
|
|||
|
||||
def insert_translations(self):
|
||||
with self.Session() as session:
|
||||
for itemId, translation in key_mapping.items():
|
||||
for itemId, translation_data in key_mapping.items():
|
||||
iconId, name, classifyType, sortId = (
|
||||
translation[1],
|
||||
translation[2],
|
||||
translation[3],
|
||||
translation[4],
|
||||
translation_data[1],
|
||||
translation_data[2],
|
||||
translation_data[3],
|
||||
translation_data[4],
|
||||
)
|
||||
translation_obj = (
|
||||
session.query(Translation).filter_by(itemId=itemId).first()
|
||||
|
@ -178,9 +178,14 @@ class DepotManager:
|
|||
category="K未分类",
|
||||
)
|
||||
session.add(translation_obj)
|
||||
# Ensure existing items also have category updated if they were 'K未分类'
|
||||
# and are now in the sort list.
|
||||
# However, the main update loop below handles this.
|
||||
|
||||
for category, items in self.sort.items():
|
||||
for name in items:
|
||||
for category, items_in_category in self.sort.items():
|
||||
for name in items_in_category:
|
||||
# Update category for items found by name.
|
||||
# This also handles items that might have been initially 'K未分类'.
|
||||
session.query(Translation).filter_by(name=name).update(
|
||||
{Translation.category: category}
|
||||
)
|
||||
|
@ -348,6 +353,87 @@ class DepotManager:
|
|||
self.read_time,
|
||||
]
|
||||
|
||||
def get_item_count(self, item_name: str) -> int:
|
||||
"""
|
||||
Retrieves the latest recorded count for a single item by its translated name.
|
||||
|
||||
This function first looks up the itemId corresponding to the given item_name
|
||||
in the Translation table. Then, it finds the most recent time entry (timestamp)
|
||||
for that itemId in the Count table. If multiple entries exist for this exact
|
||||
latest time (e.g., due to different import types 'SK'/'CV' or multiple merges for
|
||||
the same time), it prioritizes the one that was inserted into the database
|
||||
last (i.e., has the highest primary key 'id').
|
||||
|
||||
Args:
|
||||
item_name: The translated name of the item (e.g., "合成玉").
|
||||
|
||||
Returns:
|
||||
The latest count of the item as an integer. Returns 0 if the item name
|
||||
is not found in translations, if the item has no count entries,
|
||||
if the count value is None, or if the count value cannot be
|
||||
converted to an integer.
|
||||
"""
|
||||
|
||||
with self.Session() as session:
|
||||
# 1. Find itemId from item_name
|
||||
translation_entry = (
|
||||
session.query(Translation.itemId)
|
||||
.filter(Translation.name == item_name)
|
||||
.first() # Returns a RowProxy (tuple-like) e.g., ('some_item_id',), or None
|
||||
)
|
||||
|
||||
if not translation_entry:
|
||||
logger.debug(
|
||||
f"Item name '{item_name}' not found in translations. Cannot retrieve count."
|
||||
)
|
||||
return 0
|
||||
|
||||
actual_item_id = translation_entry[
|
||||
0
|
||||
] # Get the itemId string from the tuple
|
||||
|
||||
# 2. Find the latest time recorded for this specific actual_item_id.
|
||||
# Count.time is stored as Text (stringified timestamps), func.max works lexicographically.
|
||||
latest_time_for_item = (
|
||||
session.query(func.max(Count.time))
|
||||
.filter(Count.itemId == actual_item_id)
|
||||
.scalar()
|
||||
)
|
||||
|
||||
if latest_time_for_item is None:
|
||||
# No count entries found for this actual_item_id.
|
||||
# This is a valid case: item exists in translations but has no count data yet.
|
||||
return 0
|
||||
|
||||
# 3. Fetch the Count.count value for the item at its latest recorded time.
|
||||
# Order by Count.id descending to pick the most recent insertion
|
||||
# if multiple records share the same itemId and latest_time_for_item.
|
||||
count_record_tuple = (
|
||||
session.query(Count.count) # Selects only the count column
|
||||
.filter(
|
||||
Count.itemId == actual_item_id, Count.time == latest_time_for_item
|
||||
)
|
||||
.order_by(Count.id.desc())
|
||||
.first() # Returns a RowProxy (tuple-like) e.g., ('123',), or None if no match.
|
||||
)
|
||||
|
||||
if count_record_tuple and count_record_tuple[0] is not None:
|
||||
# count_record_tuple[0] is the value from the Count.count column.
|
||||
try:
|
||||
return int(count_record_tuple[0])
|
||||
except ValueError:
|
||||
logger.error(
|
||||
f"Could not convert count '{count_record_tuple[0]}' to int for item '{item_name}' (itemId: '{actual_item_id}') "
|
||||
f"at time '{latest_time_for_item}'."
|
||||
)
|
||||
return 0 # Return 0 if count value is not a valid integer string.
|
||||
else:
|
||||
# This block is reached if:
|
||||
# 1. count_record_tuple is None (no row found, which is unlikely if latest_time_for_item was found,
|
||||
# unless there's a race condition or DB inconsistency).
|
||||
# 2. count_record_tuple[0] is None (the Count.count column had a NULL value for the latest entry).
|
||||
return 0
|
||||
|
||||
def close_engine(self):
|
||||
self.engine.dispose()
|
||||
|
||||
|
|
22
mower/solvers/infra/train/train_agent_choose.py
Normal file
22
mower/solvers/infra/train/train_agent_choose.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
from mower.solvers.infra.base_mixin import BaseMixin
|
||||
from mower.utils.solver import BaseSolver
|
||||
|
||||
|
||||
class TrainAgentChooseSolver(BaseSolver, BaseMixin):
|
||||
solver_name = "切换训练位干员"
|
||||
|
||||
def run(self, agent: str = ""):
|
||||
self.agent = agent
|
||||
super().run()
|
||||
|
||||
# def transition(self) -> bool:
|
||||
# if (scene := self.scene()) == Scene.INFRA_DETAILS:
|
||||
# if pos := self.find("arrange_check_in"):
|
||||
# self.ctap(pos, 3)
|
||||
# return
|
||||
# current = GetAgentFromRoomSolver()[1]["agent"]
|
||||
|
||||
|
||||
#
|
||||
# else:
|
||||
# EnterRoomSolver().run("train")
|
|
@ -363,6 +363,33 @@ location = {
|
|||
"H14-4": [(7890, 262), "dark"],
|
||||
"14-23": [(8368, -1), "light"],
|
||||
},
|
||||
15: {
|
||||
"15-1": [(0, 0), "light"],
|
||||
"15-2": [[334, 116], "dark"],
|
||||
"15-3": [[668, -52], "light"],
|
||||
"15-4": [[1036, 135], "light"],
|
||||
"15-5": [[1503, 0], "light"],
|
||||
"15-6": [[1769, -115], "light"],
|
||||
"15-7": [[1960, 84], "light"],
|
||||
"15-8": [[2287, 17], "light"],
|
||||
"15-9": [[2630, 17], "light"],
|
||||
"15-10": [[2960, 17], "dark"],
|
||||
"15-11": [[3236, -115], "light"],
|
||||
"15-12": [[3438, 74], "light"],
|
||||
"15-13": [[3870, -89], "light"],
|
||||
"15-14": [[4451, -9], "dark"],
|
||||
"15-15": [[4781, -88], "light"],
|
||||
"15-16": [[5057, 56], "light"],
|
||||
"15-17": [[5414, 147], "light"],
|
||||
"15-18": [[5807, 41], "light"],
|
||||
"15-19": [[6096, -133], "dark"],
|
||||
"15-20": [[6487, -133], "dark"],
|
||||
"15-21": [[7802, -133], "light"],
|
||||
"H15-1": [(6644, -391), "dark"],
|
||||
"H15-2": [(7313, -392), "dark"],
|
||||
"H15-3": [(6917, 67), "dark"],
|
||||
"H15-4": [(7588, 68), "dark"],
|
||||
},
|
||||
"OF": {
|
||||
"OF-1": [(0, 0), "light"],
|
||||
"OF-2": [(738, 144), "light"],
|
||||
|
@ -484,7 +511,7 @@ class NavigationSolver(BaseSolver):
|
|||
self.prefix = prefix
|
||||
if prefix in location and self.name in location[prefix]:
|
||||
logger.info(f"主线关卡导航:{name}")
|
||||
if prefix < 4:
|
||||
if prefix < 4 or prefix == 15:
|
||||
act = 0
|
||||
elif prefix < 9:
|
||||
act = 1
|
||||
|
|
|
@ -28,6 +28,7 @@ class Score(BaseSolver):
|
|||
"12": [7014, -23],
|
||||
"13": [7544, -64],
|
||||
"14": [8199, -54],
|
||||
"15": [8923, 11],
|
||||
},
|
||||
"方舟": {},
|
||||
"那被祝福的": {},
|
||||
|
|
|
@ -16,7 +16,6 @@ from mower.utils.solver import BaseSolver
|
|||
from mower.utils.vector import overlap, sa, va
|
||||
|
||||
drop_digits = [generate_name(str(i), font_size=28, style="dark") for i in range(10)]
|
||||
confirm = (1635, 865)
|
||||
default_duration = timedelta(minutes=3)
|
||||
|
||||
|
||||
|
@ -152,6 +151,13 @@ class OperationSolver(BaseSolver):
|
|||
break
|
||||
return i, scope
|
||||
|
||||
def medicine_confirm(self, reason: str):
|
||||
result = self.ctap((1635, 865), 3)
|
||||
if result:
|
||||
self.auto_repeat = True
|
||||
logger.info(reason)
|
||||
return result
|
||||
|
||||
def transition(self):
|
||||
if (scene := self.scene()) == Scene.OPERATOR_BEFORE:
|
||||
if self.check_timeout():
|
||||
|
@ -202,8 +208,7 @@ class OperationSolver(BaseSolver):
|
|||
if config.conf.use_all_medicine:
|
||||
if self.check_timeout():
|
||||
return True
|
||||
if self.ctap(confirm, 3):
|
||||
logger.info("自动使用全部理智药")
|
||||
self.medicine_confirm("自动使用全部理智药")
|
||||
return
|
||||
use_medicine = False
|
||||
# 先看设置是否吃药
|
||||
|
@ -222,8 +227,7 @@ class OperationSolver(BaseSolver):
|
|||
if use_medicine:
|
||||
if self.check_timeout():
|
||||
return True
|
||||
if self.ctap(confirm, 3):
|
||||
logger.info("使用即将过期的理智药")
|
||||
self.medicine_confirm("使用即将过期的理智药")
|
||||
return
|
||||
self.sanity_drain = True
|
||||
return True
|
||||
|
@ -237,8 +241,7 @@ class OperationSolver(BaseSolver):
|
|||
logger.error("至纯源石不足")
|
||||
self.sanity_drain = True
|
||||
return True
|
||||
if self.ctap(confirm):
|
||||
logger.info("使用至纯源石恢复理智")
|
||||
if self.medicine_confirm("使用至纯源石恢复理智"):
|
||||
config.conf.originite -= 1
|
||||
config.save_conf()
|
||||
elif scene == Scene.OPERATOR_ELIMINATE:
|
||||
|
|
|
@ -309,6 +309,14 @@ class Recognizer:
|
|||
self.scene = Scene.AGREEMENT_UPDATE
|
||||
elif self.find("notice"):
|
||||
self.scene = Scene.NOTICE
|
||||
elif self.find("skill_collect_confirm"):
|
||||
self.scene = Scene.TRAIN_FINISH
|
||||
elif self.find("training_support"):
|
||||
self.scene = Scene.TRAIN_SKILL_SELECT
|
||||
elif self.find("upgrade_failure"):
|
||||
self.scene = Scene.TRAIN_SKILL_UPGRADE_ERROR
|
||||
elif self.find("skill_confirm"):
|
||||
self.scene = Scene.TRAIN_SKILL_UPGRADE
|
||||
elif self.find("sss/main"):
|
||||
self.scene = Scene.SSS_MAIN
|
||||
elif self.find("sss/start") or self.find("sss/start_ex"):
|
||||
|
@ -716,38 +724,6 @@ class Recognizer:
|
|||
self.check_loading_time()
|
||||
return self.scene
|
||||
|
||||
def get_train_scene(self) -> int:
|
||||
"""
|
||||
训练室场景识别
|
||||
"""
|
||||
# 场景缓存
|
||||
if self.scene != Scene.UNDEFINED:
|
||||
return self.scene
|
||||
# 连接中,优先级最高
|
||||
if self.find("connecting"):
|
||||
self.scene = Scene.CONNECTING
|
||||
elif self.find("infra_overview"):
|
||||
self.scene = Scene.INFRA_MAIN
|
||||
elif self.find("train_main"):
|
||||
self.scene = Scene.TRAIN_MAIN
|
||||
elif self.find("skill_collect_confirm", scope=((1142, 831), (1282, 932))):
|
||||
self.scene = Scene.TRAIN_FINISH
|
||||
elif self.find("training_support"):
|
||||
self.scene = Scene.TRAIN_SKILL_SELECT
|
||||
elif self.find("upgrade_failure"):
|
||||
self.scene = Scene.TRAIN_SKILL_UPGRADE_ERROR
|
||||
elif self.find("skill_confirm"):
|
||||
self.scene = Scene.TRAIN_SKILL_UPGRADE
|
||||
else:
|
||||
self.scene = Scene.UNKNOWN
|
||||
self.check_current_focus()
|
||||
|
||||
logger.debug(f"Scene: {self.scene}: {SceneComment[self.scene]}")
|
||||
|
||||
self.check_loading_time()
|
||||
|
||||
return self.scene
|
||||
|
||||
def find(
|
||||
self,
|
||||
res: tp.Res,
|
||||
|
|
|
@ -205,6 +205,7 @@ color = {
|
|||
"sign_in/special_access/banner": (391, 493),
|
||||
"sign_in/spring_festival/collect": (781, 953),
|
||||
"sign_in/spring_festival/receive": (834, 859),
|
||||
"skill_confirm": (1688, 899),
|
||||
"skip": (1803, 32),
|
||||
"sss/EC_up": (1049, 490),
|
||||
"sss/action": (1641, 967),
|
||||
|
@ -238,6 +239,7 @@ color = {
|
|||
"sss/switch_to_ex": (1255, 942),
|
||||
"sss/switch_to_normal": (1255, 934),
|
||||
"start_story": (1392, 623),
|
||||
"training_support": (116, 542),
|
||||
"vector/action_complete": (1280, 627),
|
||||
"vector/down": (152, 608),
|
||||
"vector/entry": (1479, 600),
|
||||
|
@ -487,6 +489,7 @@ template_matching = {
|
|||
"sign_in/spring_festival/login_day": ((1270, 160), (1460, 980)),
|
||||
"sign_in/task/banner": (645, 27),
|
||||
"sign_in/task/entry": (1682, 186),
|
||||
"skill_collect_confirm": (1164, 837),
|
||||
"sss/abandon": ((0, 504), (289, 564)),
|
||||
"sss/accomplished": (640, 381),
|
||||
"sss/drop/CASTER": ((0, 745), (1920, 808)),
|
||||
|
@ -514,6 +517,7 @@ template_matching = {
|
|||
"terminal/regular": (1083, 988),
|
||||
"terminal/score": (443, 988),
|
||||
"upgrade": (997, 501),
|
||||
"upgrade_failure": ((1419, 104), (1920, 235)),
|
||||
"user": (50, 452),
|
||||
"user_on": (51, 450),
|
||||
"visit_limit": ((1550, 100), (1920, 170)),
|
||||
|
|
|
@ -275,6 +275,7 @@ Res = Literal[
|
|||
"navigation/score/timeline/主题曲/12",
|
||||
"navigation/score/timeline/主题曲/13",
|
||||
"navigation/score/timeline/主题曲/14",
|
||||
"navigation/score/timeline/主题曲/15",
|
||||
"navigation/score/timeline/主题曲/2",
|
||||
"navigation/score/timeline/主题曲/3",
|
||||
"navigation/score/timeline/主题曲/4",
|
||||
|
|
13
server.py
13
server.py
|
@ -257,8 +257,10 @@ def scene_list():
|
|||
|
||||
@app.route("/activity")
|
||||
def activity():
|
||||
from mower.solvers.depot_reader import DepotManager
|
||||
from mower.solvers.navigation.activity import ActivityNavigation
|
||||
|
||||
a = DepotManager()
|
||||
location = ActivityNavigation.location["normal"]
|
||||
if isinstance(location, dict):
|
||||
stages = list(location.keys())
|
||||
|
@ -292,7 +294,16 @@ def activity():
|
|||
end = item["end"]
|
||||
except Exception:
|
||||
begin, end = None, None
|
||||
result.append({"name": item["id"], "drop": drop, "begin": begin, "end": end})
|
||||
count = a.get_item_count(item["drop"]["常规掉落"][1])
|
||||
result.append(
|
||||
{
|
||||
"name": item["id"],
|
||||
"drop": drop,
|
||||
"begin": begin,
|
||||
"end": end,
|
||||
"count": count,
|
||||
}
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
|
2
ui/dist/assets/DebugConsole.js
vendored
2
ui/dist/assets/DebugConsole.js
vendored
|
@ -1,6 +1,6 @@
|
|||
import { IosArrowBack, IosArrowForward } from "./IosArrowForward.js";
|
||||
import { Scrollbar } from "./Scrollbar.js";
|
||||
import { derived, composite, c, cB, insideModal, insidePopover, cM, cE, defineComponent, h, useConfig, useRtl, useTheme, provide, toRef, computed, useThemeClass, createInjectionKey, inject, throwError, resolveSlot, ref, watchEffect, useMergedState, on, depx, off, call, onMounted, watch, createBlock, withCtx, openBlock, createVNode, createElementBlock, Fragment, renderList, unref, createTextVNode, toDisplayString, createBaseVNode, createCommentVNode, isRef, Button } from "./index.js";
|
||||
import { derived, composite, c, cB, insideModal, insidePopover, cM, cE, defineComponent, h, useConfig, useRtl, useTheme, provide, toRef, computed, useThemeClass, createInjectionKey, inject, throwError, resolveSlot, ref, watchEffect, useMergedState, on, depx, call, off, onMounted, watch, createBlock, withCtx, openBlock, createVNode, createElementBlock, Fragment, renderList, unref, createTextVNode, toDisplayString, createBaseVNode, createCommentVNode, isRef, Button } from "./index.js";
|
||||
import { __unplugin_components_2 } from "./Flex.js";
|
||||
import { __unplugin_components_4, __unplugin_components_9 } from "./Image.js";
|
||||
import { __unplugin_components_6 } from "./Slider.js";
|
||||
|
|
6
ui/dist/assets/DesktopSettings.css
vendored
6
ui/dist/assets/DesktopSettings.css
vendored
|
@ -8,10 +8,10 @@
|
|||
}
|
||||
|
||||
/* Styles 部分保持不变 */
|
||||
.float-btn[data-v-249c7f74] {
|
||||
.float-btn[data-v-0185b520] {
|
||||
z-index: 9999;
|
||||
}
|
||||
.settings-panel[data-v-249c7f74] {
|
||||
.settings-panel[data-v-0185b520] {
|
||||
width: 850px;
|
||||
height: calc(min(100vh - 70px, 630px));
|
||||
background-color: #f8f8f8;
|
||||
|
@ -19,7 +19,7 @@
|
|||
box-shadow: 0 2px 8px rgba(0, 0, 0, 0.15);
|
||||
overflow: hidden;
|
||||
}
|
||||
.settings-content-area[data-v-249c7f74] {
|
||||
.settings-content-area[data-v-0185b520] {
|
||||
height: 100%;
|
||||
overflow-y: auto;
|
||||
background-color: #fff;
|
||||
|
|
2
ui/dist/assets/DesktopSettings.js
vendored
2
ui/dist/assets/DesktopSettings.js
vendored
|
@ -1325,7 +1325,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const DesktopSettings = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-249c7f74"]]);
|
||||
const DesktopSettings = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-0185b520"]]);
|
||||
export {
|
||||
DesktopSettings as default
|
||||
};
|
||||
|
|
2
ui/dist/assets/Doc.css
vendored
2
ui/dist/assets/Doc.css
vendored
|
@ -1,5 +1,5 @@
|
|||
|
||||
.loading[data-v-f9ae5a23] {
|
||||
.loading[data-v-69c55212] {
|
||||
position: absolute;
|
||||
top: 48px;
|
||||
font-size: 28px;
|
||||
|
|
2
ui/dist/assets/Doc.js
vendored
2
ui/dist/assets/Doc.js
vendored
|
@ -46,7 +46,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const Doc = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-f9ae5a23"]]);
|
||||
const Doc = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-69c55212"]]);
|
||||
export {
|
||||
Doc as default
|
||||
};
|
||||
|
|
22
ui/dist/assets/DropDown.css
vendored
22
ui/dist/assets/DropDown.css
vendored
|
@ -1,39 +1,39 @@
|
|||
.button_row[data-v-c3862e91] {
|
||||
.button_row[data-v-9e1304bf] {
|
||||
margin-top: 8px;
|
||||
}
|
||||
.task_row[data-v-c3862e91] {
|
||||
.task_row[data-v-9e1304bf] {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 8px;
|
||||
width: 100%;
|
||||
}
|
||||
.task_row .n-input[data-v-c3862e91] {
|
||||
.task_row .n-input[data-v-9e1304bf] {
|
||||
width: 140px;
|
||||
}
|
||||
.outer[data-v-c3862e91] {
|
||||
.outer[data-v-9e1304bf] {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
align-items: center;
|
||||
gap: 18px;
|
||||
}
|
||||
.inner[data-v-c3862e91] {
|
||||
.inner[data-v-9e1304bf] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 8px;
|
||||
}
|
||||
.task-col[data-v-c3862e91] {
|
||||
.task-col[data-v-9e1304bf] {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
gap: 8px;
|
||||
align-items: center;
|
||||
}
|
||||
.n-dynamic-tags[data-v-c3862e91] {
|
||||
.n-dynamic-tags[data-v-9e1304bf] {
|
||||
align-items: center;
|
||||
}
|
||||
.ml[data-v-c3862e91] {
|
||||
.ml[data-v-9e1304bf] {
|
||||
margin-left: 16px;
|
||||
}
|
||||
.dropdown[data-v-c4c4a151] {
|
||||
padding-left: var(--179f97e8);
|
||||
padding-right: var(--179f97e8);
|
||||
.dropdown[data-v-01fc4d7e] {
|
||||
padding-left: var(--5dbb34be);
|
||||
padding-right: var(--5dbb34be);
|
||||
}
|
||||
|
|
6
ui/dist/assets/DropDown2.js
vendored
6
ui/dist/assets/DropDown2.js
vendored
|
@ -10343,7 +10343,7 @@ const _sfc_main$1 = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const __unplugin_components_1 = /* @__PURE__ */ _export_sfc(_sfc_main$1, [["__scopeId", "data-v-c3862e91"]]);
|
||||
const __unplugin_components_1 = /* @__PURE__ */ _export_sfc(_sfc_main$1, [["__scopeId", "data-v-9e1304bf"]]);
|
||||
var MdArrowDropup = {};
|
||||
var hasRequiredMdArrowDropup;
|
||||
function requireMdArrowDropup() {
|
||||
|
@ -10440,7 +10440,7 @@ const _sfc_main = {
|
|||
},
|
||||
setup(__props) {
|
||||
useCssVars((_ctx) => ({
|
||||
"179f97e8": unref(btn_pad)
|
||||
"5dbb34be": unref(btn_pad)
|
||||
}));
|
||||
const mobile = inject("mobile");
|
||||
const btn_pad = computed(() => {
|
||||
|
@ -10490,7 +10490,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const __unplugin_components_8 = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-c4c4a151"]]);
|
||||
const __unplugin_components_8 = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-01fc4d7e"]]);
|
||||
export {
|
||||
__unplugin_components_1,
|
||||
__unplugin_components_4,
|
||||
|
|
2
ui/dist/assets/HelpText.css
vendored
2
ui/dist/assets/HelpText.css
vendored
|
@ -1,4 +1,4 @@
|
|||
|
||||
.help[data-v-e18814d8] {
|
||||
.help[data-v-587b8b4b] {
|
||||
z-index: 100;
|
||||
}
|
||||
|
|
2
ui/dist/assets/HelpText.js
vendored
2
ui/dist/assets/HelpText.js
vendored
|
@ -53,7 +53,7 @@ function _sfc_render(_ctx, _cache) {
|
|||
_: 3
|
||||
});
|
||||
}
|
||||
const __unplugin_components_0 = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render], ["__scopeId", "data-v-e18814d8"]]);
|
||||
const __unplugin_components_0 = /* @__PURE__ */ _export_sfc(_sfc_main, [["render", _sfc_render], ["__scopeId", "data-v-587b8b4b"]]);
|
||||
export {
|
||||
__unplugin_components_0
|
||||
};
|
||||
|
|
24
ui/dist/assets/Log.css
vendored
24
ui/dist/assets/Log.css
vendored
|
@ -1,38 +1,38 @@
|
|||
.log[data-v-f995835a] {
|
||||
.log[data-v-af2dccc2] {
|
||||
overflow: hidden;
|
||||
flex: 1;
|
||||
}
|
||||
.task-table[data-v-f995835a] {
|
||||
.task-table[data-v-af2dccc2] {
|
||||
position: relative;
|
||||
max-width: 600px;
|
||||
}
|
||||
.task-table th[data-v-f995835a] {
|
||||
.task-table th[data-v-af2dccc2] {
|
||||
padding: 2px 16px;
|
||||
}
|
||||
.task-table td[data-v-f995835a] {
|
||||
.task-table td[data-v-af2dccc2] {
|
||||
height: 24px;
|
||||
padding: 2px 8px;
|
||||
}
|
||||
.task-table td[data-v-f995835a]:last-child {
|
||||
.task-table td[data-v-af2dccc2]:last-child {
|
||||
width: 100%;
|
||||
}
|
||||
.action-container[data-v-f995835a] {
|
||||
.action-container[data-v-af2dccc2] {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
z-index: 15;
|
||||
}
|
||||
.toggle-table-collapse-btn[data-v-f995835a] {
|
||||
.toggle-table-collapse-btn[data-v-af2dccc2] {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 0;
|
||||
}
|
||||
.toggle-fullscreen-btn[data-v-f995835a] {
|
||||
.toggle-fullscreen-btn[data-v-af2dccc2] {
|
||||
position: absolute;
|
||||
top: 0;
|
||||
right: 38px;
|
||||
}
|
||||
.log-bg[data-v-f995835a] {
|
||||
.log-bg[data-v-af2dccc2] {
|
||||
content: "";
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
|
@ -42,12 +42,12 @@
|
|||
opacity: 0.25;
|
||||
background-image: url(/bg1.webp), url(/bg2.webp);
|
||||
background-repeat: no-repeat;
|
||||
background-size: var(--77097c62);
|
||||
background-position: var(--aed33812);
|
||||
background-size: var(--054a8042);
|
||||
background-position: var(--4a685a07);
|
||||
pointer-events: none;
|
||||
z-index: 14;
|
||||
}
|
||||
.sc[data-v-f995835a] {
|
||||
.sc[data-v-af2dccc2] {
|
||||
max-width: 480px;
|
||||
max-height: 270px;
|
||||
border-radius: 6px;
|
||||
|
|
6
ui/dist/assets/Log.js
vendored
6
ui/dist/assets/Log.js
vendored
|
@ -829,8 +829,8 @@ const _sfc_main = {
|
|||
__name: "Log",
|
||||
setup(__props) {
|
||||
useCssVars((_ctx) => ({
|
||||
"77097c62": unref(bg_size),
|
||||
"aed33812": unref(bg_position)
|
||||
"054a8042": unref(bg_size),
|
||||
"4a685a07": unref(bg_position)
|
||||
}));
|
||||
const mower_store = useMowerStore();
|
||||
const { ws, log, log_mobile, running, task_list, waiting, get_task_id, sc_uri } = storeToRefs(mower_store);
|
||||
|
@ -1061,7 +1061,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const Log = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-f995835a"]]);
|
||||
const Log = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-af2dccc2"]]);
|
||||
export {
|
||||
Log as default
|
||||
};
|
||||
|
|
168
ui/dist/assets/LongTasks.css
vendored
168
ui/dist/assets/LongTasks.css
vendored
|
@ -1,44 +1,44 @@
|
|||
@charset "UTF-8";
|
||||
.coord-label[data-v-bd41e82d] {
|
||||
.coord-label[data-v-f4ed7511] {
|
||||
width: 40px;
|
||||
padding-left: 8px;
|
||||
}
|
||||
.card-title[data-v-bd41e82d] {
|
||||
.card-title[data-v-f4ed7511] {
|
||||
margin-right: 8px;
|
||||
}
|
||||
|
||||
.threshold[data-v-9ffc8196] {
|
||||
.threshold[data-v-17303244] {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 14px;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.email-title[data-v-f611d9fe] {
|
||||
.email-title[data-v-3b2776a3] {
|
||||
width: 100%;
|
||||
}
|
||||
.expand[data-v-f611d9fe] {
|
||||
.expand[data-v-3b2776a3] {
|
||||
flex-grow: 1;
|
||||
}
|
||||
.email-table[data-v-f611d9fe] {
|
||||
.email-table[data-v-3b2776a3] {
|
||||
width: 100%;
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
.email-test[data-v-f611d9fe] {
|
||||
.email-test[data-v-3b2776a3] {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 16px;
|
||||
}
|
||||
.email-mode[data-v-f611d9fe] {
|
||||
.email-mode[data-v-3b2776a3] {
|
||||
margin-left: 20px;
|
||||
}
|
||||
.email-label[data-v-f611d9fe] {
|
||||
.email-label[data-v-3b2776a3] {
|
||||
width: 68px;
|
||||
}
|
||||
p[data-v-f611d9fe] {
|
||||
p[data-v-3b2776a3] {
|
||||
margin: 0 0 10px 0;
|
||||
}
|
||||
.mt-16[data-v-f611d9fe] {
|
||||
.mt-16[data-v-3b2776a3] {
|
||||
margin-top: 16px;
|
||||
}
|
||||
|
||||
|
@ -49,122 +49,122 @@ p[data-v-f611d9fe] {
|
|||
gap: 12px;
|
||||
}
|
||||
|
||||
.subtitle[data-v-e24fdfe6] {
|
||||
.subtitle[data-v-4f64e716] {
|
||||
margin: 12px 0 6px;
|
||||
}
|
||||
|
||||
.scale[data-v-60093aec] {
|
||||
.scale[data-v-26ad4730] {
|
||||
width: 60px;
|
||||
text-align: right;
|
||||
}
|
||||
.scale-apply[data-v-60093aec] {
|
||||
.scale-apply[data-v-26ad4730] {
|
||||
margin-left: 24px;
|
||||
}
|
||||
.waiting-table {
|
||||
th,
|
||||
td {
|
||||
&[data-v-60093aec] {
|
||||
&[data-v-26ad4730] {
|
||||
padding: 4px;
|
||||
min-width: 70px;
|
||||
width: 100px;
|
||||
}
|
||||
&[data-v-60093aec]:first-child {
|
||||
&[data-v-26ad4730]:first-child {
|
||||
width: auto;
|
||||
padding: 4px 8px;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.item[data-v-2ceb271c] {
|
||||
.item[data-v-9889efe8] {
|
||||
font-weight: 500;
|
||||
font-size: 16px;
|
||||
}
|
||||
.n-divider[data-v-2ceb271c]:not(.n-divider--vertical) {
|
||||
.n-divider[data-v-9889efe8]:not(.n-divider--vertical) {
|
||||
margin: 6px 0;
|
||||
}
|
||||
.telemetry[data-v-2ceb271c] {
|
||||
.telemetry[data-v-9889efe8] {
|
||||
border-collapse: collapse;
|
||||
table-layout: fixed;
|
||||
margin-bottom: 6px;
|
||||
}
|
||||
th[data-v-2ceb271c],
|
||||
td[data-v-2ceb271c] {
|
||||
th[data-v-9889efe8],
|
||||
td[data-v-9889efe8] {
|
||||
border: 1px solid white;
|
||||
padding: 2px 6px;
|
||||
}
|
||||
p[data-v-e83a44f6] {
|
||||
p[data-v-46384f04] {
|
||||
margin: 0 0 8px 0;
|
||||
}
|
||||
h4[data-v-e83a44f6] {
|
||||
h4[data-v-46384f04] {
|
||||
margin: 12px 0 10px 0;
|
||||
}
|
||||
.big-table[data-v-e83a44f6] {
|
||||
.big-table[data-v-46384f04] {
|
||||
margin-top: 10px;
|
||||
max-width: 320px;
|
||||
}
|
||||
.big-table th[data-v-e83a44f6] {
|
||||
.big-table th[data-v-46384f04] {
|
||||
text-align: center;
|
||||
}
|
||||
.big-table tr[data-v-e83a44f6] {
|
||||
.big-table tr[data-v-46384f04] {
|
||||
width: 70px;
|
||||
}
|
||||
.big-table td[data-v-e83a44f6] {
|
||||
.big-table td[data-v-46384f04] {
|
||||
height: 24px;
|
||||
}
|
||||
.big-table td[data-v-e83a44f6]:nth-child(1) {
|
||||
.big-table td[data-v-46384f04]:nth-child(1) {
|
||||
width: 70px;
|
||||
text-align: center;
|
||||
}
|
||||
.big-table td[data-v-e83a44f6]:nth-child(2) {
|
||||
.big-table td[data-v-46384f04]:nth-child(2) {
|
||||
width: 420px;
|
||||
}
|
||||
.final[data-v-e83a44f6] {
|
||||
.final[data-v-46384f04] {
|
||||
margin: 16px 0 0;
|
||||
}p[data-v-70be2e67] {
|
||||
}p[data-v-83ca4317] {
|
||||
margin: 2px 0;
|
||||
}
|
||||
h4[data-v-70be2e67] {
|
||||
h4[data-v-83ca4317] {
|
||||
margin: 12px 0 8px 0;
|
||||
}
|
||||
table[data-v-70be2e67] {
|
||||
table[data-v-83ca4317] {
|
||||
width: 100%;
|
||||
}
|
||||
td[data-v-70be2e67]:nth-child(1) {
|
||||
td[data-v-83ca4317]:nth-child(1) {
|
||||
width: 80px;
|
||||
}
|
||||
.ignore-blacklist[data-v-70be2e67] {
|
||||
.ignore-blacklist[data-v-83ca4317] {
|
||||
margin-bottom: 10px;
|
||||
display: flex;
|
||||
gap: 12px;
|
||||
}
|
||||
.h4[data-v-70be2e67] {
|
||||
.h4[data-v-83ca4317] {
|
||||
font-size: 16px;
|
||||
font-weight: 500;
|
||||
}
|
||||
.maa-shop[data-v-70be2e67] {
|
||||
.maa-shop[data-v-83ca4317] {
|
||||
margin: 8px 0;
|
||||
}
|
||||
.item[data-v-70be2e67] {
|
||||
.item[data-v-83ca4317] {
|
||||
font-weight: 500;
|
||||
font-size: 16px;
|
||||
}
|
||||
/* 表单和活动区域样式调整 (可选) */
|
||||
.form-item .n-flex[data-v-0897e500] {
|
||||
.form-item .n-flex[data-v-d720c1de] {
|
||||
gap: 8px 12px;
|
||||
}
|
||||
.activity .n-tag[data-v-0897e500] {
|
||||
.activity .n-tag[data-v-d720c1de] {
|
||||
margin-bottom: 4px;
|
||||
}
|
||||
|
||||
/* 周计划表格容器 */
|
||||
.plan-container[data-v-0897e500] {
|
||||
.plan-container[data-v-d720c1de] {
|
||||
border: 1px solid var(--n-border-color);
|
||||
border-radius: var(--n-border-radius);
|
||||
margin-top: 12px;
|
||||
}
|
||||
|
||||
/* 表头行样式 */
|
||||
.plan-header[data-v-0897e500] {
|
||||
.plan-header[data-v-d720c1de] {
|
||||
font-size: 12px;
|
||||
color: var(--n-text-color-2);
|
||||
padding: 6px 10px;
|
||||
|
@ -173,7 +173,7 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
}
|
||||
|
||||
/* 表头 - 关卡列 */
|
||||
.header-stage[data-v-0897e500] {
|
||||
.header-stage[data-v-d720c1de] {
|
||||
width: 120px; /* 固定宽度 */
|
||||
flex-shrink: 0;
|
||||
text-align: left;
|
||||
|
@ -182,22 +182,22 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
}
|
||||
|
||||
/* 表头 - 日期列 */
|
||||
.header-days[data-v-0897e500] {
|
||||
.header-days[data-v-d720c1de] {
|
||||
flex-grow: 1;
|
||||
text-align: center;
|
||||
min-width: 210px; /* 保证容纳7个按钮的宽度 */
|
||||
}
|
||||
.header-days .today-header[data-v-0897e500] {
|
||||
.header-days .today-header[data-v-d720c1de] {
|
||||
font-weight: normal;
|
||||
color: var(--n-text-color-1);
|
||||
}
|
||||
.header-days .today-name[data-v-0897e500] {
|
||||
.header-days .today-name[data-v-d720c1de] {
|
||||
font-weight: bold;
|
||||
color: var(--n-color-error); /* 高亮当天星期名称 */
|
||||
}
|
||||
|
||||
/* 表头 - 操作列 */
|
||||
.header-action[data-v-0897e500] {
|
||||
.header-action[data-v-d720c1de] {
|
||||
width: 40px; /* 固定宽度 */
|
||||
flex-shrink: 0;
|
||||
text-align: center;
|
||||
|
@ -206,19 +206,19 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
}
|
||||
|
||||
/* 计划项目行样式 */
|
||||
.plan-item[data-v-0897e500] {
|
||||
.plan-item[data-v-d720c1de] {
|
||||
padding: 6px 10px;
|
||||
border-bottom: 1px solid var(--n-border-color);
|
||||
}
|
||||
.plan-item[data-v-0897e500]:last-child {
|
||||
.plan-item[data-v-d720c1de]:last-child {
|
||||
border-bottom: none;
|
||||
}
|
||||
.plan-item[data-v-0897e500]:nth-child(even) {
|
||||
.plan-item[data-v-d720c1de]:nth-child(even) {
|
||||
background-color: var(--n-action-color); /* 斑马条纹 */
|
||||
}
|
||||
|
||||
/* 行内 - 关卡区域 */
|
||||
.stage-area[data-v-0897e500] {
|
||||
.stage-area[data-v-d720c1de] {
|
||||
width: 120px; /* 同表头宽度 */
|
||||
flex-shrink: 0;
|
||||
display: flex;
|
||||
|
@ -227,7 +227,7 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
text-overflow: ellipsis;
|
||||
white-space: nowrap;
|
||||
}
|
||||
.stage-area .stage-name[data-v-0897e500] {
|
||||
.stage-area .stage-name[data-v-d720c1de] {
|
||||
font-size: 14px;
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
|
@ -235,18 +235,18 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
padding-left: 4px;
|
||||
cursor: default; /* 显示 title 提示 */
|
||||
}
|
||||
.stage-area .stage-name[title][data-v-0897e500] {
|
||||
.stage-area .stage-name[title][data-v-d720c1de] {
|
||||
/* 确保 title 属性生效 */
|
||||
cursor: default;
|
||||
}
|
||||
.stage-area .custom-stage-input[data-v-0897e500],
|
||||
.stage-area .custom-stage-tag[data-v-0897e500] {
|
||||
.stage-area .custom-stage-input[data-v-d720c1de],
|
||||
.stage-area .custom-stage-tag[data-v-d720c1de] {
|
||||
width: 100%;
|
||||
}
|
||||
.stage-area .custom-stage-tag[data-v-0897e500] {
|
||||
.stage-area .custom-stage-tag[data-v-d720c1de] {
|
||||
justify-content: space-between;
|
||||
}
|
||||
.stage-area .custom-stage-tag[data-v-0897e500] .n-tag__content {
|
||||
.stage-area .custom-stage-tag[data-v-d720c1de] .n-tag__content {
|
||||
/* 确保标签内容不溢出 */
|
||||
overflow: hidden;
|
||||
text-overflow: ellipsis;
|
||||
|
@ -255,14 +255,14 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
}
|
||||
|
||||
/* 行内 - 每日切换按钮区域 */
|
||||
.day-toggles[data-v-0897e500] {
|
||||
.day-toggles[data-v-d720c1de] {
|
||||
flex-grow: 1;
|
||||
justify-content: space-around; /* 均匀分布按钮 */
|
||||
min-width: 210px; /* 同表头估算宽度 */
|
||||
/* 每个按钮的占位容器 */
|
||||
/* 每日切换按钮样式 */
|
||||
}
|
||||
.day-toggles .day-toggle-placeholder[data-v-0897e500] {
|
||||
.day-toggles .day-toggle-placeholder[data-v-d720c1de] {
|
||||
width: 28px; /* 固定宽度 */
|
||||
height: 28px; /* 固定高度 */
|
||||
display: flex;
|
||||
|
@ -270,7 +270,7 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
align-items: center;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
.day-toggles .day-toggle[data-v-0897e500] {
|
||||
.day-toggles .day-toggle[data-v-d720c1de] {
|
||||
width: 28px;
|
||||
height: 28px;
|
||||
padding: 0;
|
||||
|
@ -283,7 +283,7 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
/* 未选中按钮 (ghost) active 状态 */
|
||||
/* 选中按钮 (success) active 状态 */
|
||||
}
|
||||
.day-toggles .day-toggle.today-button[data-v-0897e500] {
|
||||
.day-toggles .day-toggle.today-button[data-v-d720c1de] {
|
||||
font-weight: bold; /* !!! 新增:当天按钮字体加粗 !!! */
|
||||
/* 当天且未选中的按钮,边框高亮 */
|
||||
/* 当天且未选中的按钮 (ghost 状态),文字颜色也高亮 */
|
||||
|
@ -291,56 +291,56 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
/* 当天且选中的按钮 (success 状态),添加外边框 */
|
||||
/* 当天且未选中的按钮 (ghost 状态) active */
|
||||
}
|
||||
.day-toggles .day-toggle.today-button[data-v-0897e500]:not(.n-button--success) {
|
||||
.day-toggles .day-toggle.today-button[data-v-d720c1de]:not(.n-button--success) {
|
||||
border-color: var(--n-color-info);
|
||||
}
|
||||
.day-toggles .day-toggle.today-button.n-button--ghost[data-v-0897e500]:not(:disabled) {
|
||||
.day-toggles .day-toggle.today-button.n-button--ghost[data-v-d720c1de]:not(:disabled) {
|
||||
color: var(--n-color-info);
|
||||
}
|
||||
.day-toggles .day-toggle.today-button.n-button--ghost[data-v-0897e500]:not(:disabled):hover {
|
||||
.day-toggles .day-toggle.today-button.n-button--ghost[data-v-d720c1de]:not(:disabled):hover {
|
||||
border-color: var(--n-color-info-hover);
|
||||
color: var(--n-color-info-hover);
|
||||
}
|
||||
.day-toggles .day-toggle.today-button.n-button--success[data-v-0897e500] {
|
||||
.day-toggles .day-toggle.today-button.n-button--success[data-v-d720c1de] {
|
||||
outline: 1px solid var(--n-color-info-hover);
|
||||
outline-offset: 1px;
|
||||
}
|
||||
.day-toggles .day-toggle.today-button.n-button--ghost[data-v-0897e500]:not(:disabled):active {
|
||||
.day-toggles .day-toggle.today-button.n-button--ghost[data-v-d720c1de]:not(:disabled):active {
|
||||
background-color: var(--n-color-info-pressed);
|
||||
color: #fff;
|
||||
border-color: var(--n-color-info-pressed);
|
||||
}
|
||||
.day-toggles .day-toggle.n-button--ghost[data-v-0897e500]:not(:disabled) {
|
||||
.day-toggles .day-toggle.n-button--ghost[data-v-d720c1de]:not(:disabled) {
|
||||
color: var(--n-text-color-3); /* 稍暗的颜色 */
|
||||
border-color: var(--n-border-color); /* 标准边框 */
|
||||
}
|
||||
.day-toggles .day-toggle.n-button--ghost[data-v-0897e500]:not(:disabled):hover {
|
||||
.day-toggles .day-toggle.n-button--ghost[data-v-d720c1de]:not(:disabled):hover {
|
||||
border-color: var(--n-color-primary-hover);
|
||||
color: var(--n-color-primary-hover);
|
||||
}
|
||||
.day-toggles .day-toggle.n-button--success[data-v-0897e500] {
|
||||
.day-toggles .day-toggle.n-button--success[data-v-d720c1de] {
|
||||
background-color: var(--n-color-success-suppl); /* 浅成功色背景 */
|
||||
color: var(--n-color-success-hover); /* 深成功色文字 */
|
||||
border-color: var(--n-color-success-hover); /* 成功色边框 */
|
||||
}
|
||||
.day-toggles .day-toggle.n-button--success[data-v-0897e500]:hover {
|
||||
.day-toggles .day-toggle.n-button--success[data-v-d720c1de]:hover {
|
||||
background-color: var(--n-color-success-hover);
|
||||
color: #fff;
|
||||
border-color: var(--n-color-success-hover);
|
||||
}
|
||||
.day-toggles .day-toggle.n-button--ghost[data-v-0897e500]:not(:disabled):active {
|
||||
.day-toggles .day-toggle.n-button--ghost[data-v-d720c1de]:not(:disabled):active {
|
||||
background-color: var(--n-color-primary-pressed);
|
||||
color: #fff;
|
||||
border-color: var(--n-color-primary-pressed);
|
||||
}
|
||||
.day-toggles .day-toggle.n-button--success[data-v-0897e500]:active {
|
||||
.day-toggles .day-toggle.n-button--success[data-v-d720c1de]:active {
|
||||
background-color: var(--n-color-success-pressed);
|
||||
color: #fff;
|
||||
border-color: var(--n-color-success-pressed);
|
||||
}
|
||||
|
||||
/* 行内 - 操作按钮区域 */
|
||||
.action-area[data-v-0897e500] {
|
||||
.action-area[data-v-d720c1de] {
|
||||
width: 40px; /* 同表头宽度 */
|
||||
flex-shrink: 0;
|
||||
text-align: right;
|
||||
|
@ -348,12 +348,12 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
justify-content: center;
|
||||
align-items: center;
|
||||
}
|
||||
.action-area .n-button[data-v-0897e500] {
|
||||
.action-area .n-button[data-v-d720c1de] {
|
||||
padding: 0 4px; /* 图标按钮减小内边距 */
|
||||
}
|
||||
|
||||
/* 活动标签进度条样式 (与原版一致) */
|
||||
.progress-tag[data-v-0897e500] {
|
||||
.progress-tag[data-v-d720c1de] {
|
||||
position: relative;
|
||||
padding-bottom: 5px;
|
||||
height: auto;
|
||||
|
@ -364,7 +364,7 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
align-items: center;
|
||||
overflow: hidden;
|
||||
}
|
||||
.progress[data-v-0897e500] {
|
||||
.progress[data-v-d720c1de] {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
|
@ -372,7 +372,7 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
background-color: var(--n-color-success);
|
||||
transition: width 0.3s ease;
|
||||
}
|
||||
.progress-bar[data-v-0897e500] {
|
||||
.progress-bar[data-v-d720c1de] {
|
||||
position: absolute;
|
||||
bottom: 0;
|
||||
left: 0;
|
||||
|
@ -382,31 +382,31 @@ td[data-v-70be2e67]:nth-child(1) {
|
|||
}
|
||||
|
||||
/* 禁用状态标题样式 */
|
||||
.card-title.disabled[data-v-0897e500] {
|
||||
.card-title.disabled[data-v-d720c1de] {
|
||||
color: var(--n-text-color-disabled);
|
||||
}
|
||||
.sss-container[data-v-80ad8472] {
|
||||
.sss-container[data-v-a922d165] {
|
||||
display: flex;
|
||||
width: 100%;
|
||||
gap: 8px;
|
||||
}
|
||||
.wrapper[data-v-80ad8472] {
|
||||
.wrapper[data-v-a922d165] {
|
||||
white-space: pre-wrap;
|
||||
user-select: text;
|
||||
}
|
||||
.title[data-v-80ad8472] {
|
||||
.title[data-v-a922d165] {
|
||||
font-size: 18px;
|
||||
font-weight: 500;
|
||||
margin-bottom: 6px;
|
||||
}
|
||||
p[data-v-d479d5bf] {
|
||||
p[data-v-b2d35691] {
|
||||
margin: 0 0 10px 0;
|
||||
}
|
||||
.misc-container[data-v-d479d5bf] {
|
||||
.misc-container[data-v-b2d35691] {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 12px;
|
||||
}
|
||||
.header[data-v-d479d5bf] {
|
||||
.header[data-v-b2d35691] {
|
||||
margin: 12px 0;
|
||||
}
|
44
ui/dist/assets/LongTasks.js
vendored
44
ui/dist/assets/LongTasks.js
vendored
|
@ -1821,7 +1821,7 @@ const _sfc_main$i = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const Device = /* @__PURE__ */ _export_sfc(_sfc_main$i, [["__scopeId", "data-v-bd41e82d"]]);
|
||||
const Device = /* @__PURE__ */ _export_sfc(_sfc_main$i, [["__scopeId", "data-v-f4ed7511"]]);
|
||||
const _hoisted_1$7 = { class: "threshold" };
|
||||
const _sfc_main$h = {
|
||||
__name: "RIIC",
|
||||
|
@ -2116,7 +2116,7 @@ const _sfc_main$h = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const RIIC = /* @__PURE__ */ _export_sfc(_sfc_main$h, [["__scopeId", "data-v-9ffc8196"]]);
|
||||
const RIIC = /* @__PURE__ */ _export_sfc(_sfc_main$h, [["__scopeId", "data-v-17303244"]]);
|
||||
const _hoisted_1$6 = { key: 0 };
|
||||
const _hoisted_2$5 = { key: 1 };
|
||||
const _hoisted_3$2 = { key: 0 };
|
||||
|
@ -2371,7 +2371,7 @@ const _sfc_main$g = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const Email = /* @__PURE__ */ _export_sfc(_sfc_main$g, [["__scopeId", "data-v-f611d9fe"]]);
|
||||
const Email = /* @__PURE__ */ _export_sfc(_sfc_main$g, [["__scopeId", "data-v-3b2776a3"]]);
|
||||
const _hoisted_1$5 = { style: { "display": "flex", "align-items": "center", "width": "100%" } };
|
||||
const _hoisted_2$4 = { class: "misc-container" };
|
||||
const _sfc_main$f = {
|
||||
|
@ -2603,7 +2603,7 @@ const _sfc_main$e = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const AutoFight = /* @__PURE__ */ _export_sfc(_sfc_main$e, [["__scopeId", "data-v-e24fdfe6"]]);
|
||||
const AutoFight = /* @__PURE__ */ _export_sfc(_sfc_main$e, [["__scopeId", "data-v-4f64e716"]]);
|
||||
const _sfc_main$d = {
|
||||
__name: "Appearance",
|
||||
setup(__props) {
|
||||
|
@ -2726,7 +2726,7 @@ const _sfc_main$d = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const Appearance = /* @__PURE__ */ _export_sfc(_sfc_main$d, [["__scopeId", "data-v-60093aec"]]);
|
||||
const Appearance = /* @__PURE__ */ _export_sfc(_sfc_main$d, [["__scopeId", "data-v-26ad4730"]]);
|
||||
const _sfc_main$c = {
|
||||
__name: "DailyMission",
|
||||
setup(__props) {
|
||||
|
@ -3043,7 +3043,7 @@ const _sfc_main$c = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const DailyMission = /* @__PURE__ */ _export_sfc(_sfc_main$c, [["__scopeId", "data-v-2ceb271c"]]);
|
||||
const DailyMission = /* @__PURE__ */ _export_sfc(_sfc_main$c, [["__scopeId", "data-v-9889efe8"]]);
|
||||
const _sfc_main$b = {
|
||||
__name: "Recruit",
|
||||
setup(__props) {
|
||||
|
@ -3196,7 +3196,7 @@ const _sfc_main$b = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const Recruit = /* @__PURE__ */ _export_sfc(_sfc_main$b, [["__scopeId", "data-v-e83a44f6"]]);
|
||||
const Recruit = /* @__PURE__ */ _export_sfc(_sfc_main$b, [["__scopeId", "data-v-46384f04"]]);
|
||||
const _hoisted_1$4 = { style: { "display": "flex", "align-items": "center", "width": "100%" } };
|
||||
const _hoisted_2$3 = { style: { "margin-right": "24px" } };
|
||||
const _sfc_main$a = {
|
||||
|
@ -3681,7 +3681,7 @@ const _sfc_main$8 = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const Clue = /* @__PURE__ */ _export_sfc(_sfc_main$8, [["__scopeId", "data-v-70be2e67"]]);
|
||||
const Clue = /* @__PURE__ */ _export_sfc(_sfc_main$8, [["__scopeId", "data-v-83ca4317"]]);
|
||||
class LuxonError extends Error {
|
||||
}
|
||||
class InvalidDateTimeError extends LuxonError {
|
||||
|
@ -10171,9 +10171,6 @@ const _sfc_main$7 = {
|
|||
const availabilityMap = /* @__PURE__ */ new Map();
|
||||
const allDaysAvailable = Array(7).fill(true);
|
||||
general_important_stages.forEach((stage) => availabilityMap.set(stage, allDaysAvailable));
|
||||
conf.value.custom_stages.forEach((stage) => {
|
||||
if (stage) availabilityMap.set(stage, allDaysAvailable);
|
||||
});
|
||||
general_unimportant_stages.forEach((stage) => {
|
||||
let availableDays = allDaysAvailable;
|
||||
for (const [prefix, days] of Object.entries(time_table)) {
|
||||
|
@ -10188,16 +10185,20 @@ const _sfc_main$7 = {
|
|||
}
|
||||
availabilityMap.set(stage, availableDays);
|
||||
});
|
||||
conf.value.custom_stages.forEach((stage) => {
|
||||
if (stage) {
|
||||
availabilityMap.set(stage, allDaysAvailable);
|
||||
}
|
||||
});
|
||||
return availabilityMap;
|
||||
});
|
||||
const plan = computed(() => {
|
||||
const result = [];
|
||||
const currentPlan = conf.value.weekly_plan;
|
||||
const availabilityMap = stageAvailability.value;
|
||||
const generateWeekAndState = (stage, isCustom) => {
|
||||
var _a, _b;
|
||||
const week = [];
|
||||
const available = availabilityMap.get(stage) ?? Array(7).fill(true);
|
||||
const available = stageAvailability.value.get(stage) ?? Array(7).fill(true);
|
||||
const planType = isCustom ? "custom" : "general";
|
||||
let availableCount = 0;
|
||||
let enabledCount = 0;
|
||||
|
@ -10233,12 +10234,8 @@ const _sfc_main$7 = {
|
|||
conf.value.custom_stages.forEach(
|
||||
(stage) => result.push({
|
||||
stage,
|
||||
...stage === null ? {
|
||||
week: Array(7).fill({ available: true, enable: false }),
|
||||
selectionState: "empty"
|
||||
// 空槽位默认状态为空
|
||||
} : generateWeekAndState(stage, true)
|
||||
// 否则正常生成
|
||||
...generateWeekAndState(stage, true)
|
||||
// 对所有自定义关卡(包括 null)调用 generateWeekAndState
|
||||
})
|
||||
);
|
||||
general_unimportant_stages.forEach(
|
||||
|
@ -10524,7 +10521,8 @@ const _sfc_main$7 = {
|
|||
createBaseVNode("div", null, " 剩余时间:" + toDisplayString(unref(Duration).fromObject(
|
||||
{ seconds: Math.max(0, a.end - Date.now() / 1e3) },
|
||||
{ locale: "zh-CN" }
|
||||
).shiftTo("days", "hours").toHuman()), 1)
|
||||
).shiftTo("days", "hours").toHuman()), 1),
|
||||
createBaseVNode("div", null, toDisplayString(a.drop) + " 已有:" + toDisplayString(a.count), 1)
|
||||
]),
|
||||
_: 2
|
||||
}, 1024);
|
||||
|
@ -10702,7 +10700,7 @@ const _sfc_main$7 = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const WeeklyPlan = /* @__PURE__ */ _export_sfc(_sfc_main$7, [["__scopeId", "data-v-0897e500"]]);
|
||||
const WeeklyPlan = /* @__PURE__ */ _export_sfc(_sfc_main$7, [["__scopeId", "data-v-d720c1de"]]);
|
||||
const _sfc_main$6 = {
|
||||
__name: "ReclamationAlgorithm",
|
||||
setup(__props) {
|
||||
|
@ -10987,7 +10985,7 @@ const _sfc_main$5 = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const __unplugin_components_4 = /* @__PURE__ */ _export_sfc(_sfc_main$5, [["__scopeId", "data-v-80ad8472"]]);
|
||||
const __unplugin_components_4 = /* @__PURE__ */ _export_sfc(_sfc_main$5, [["__scopeId", "data-v-a922d165"]]);
|
||||
const _hoisted_1 = { class: "misc-container" };
|
||||
const _sfc_main$4 = {
|
||||
__name: "MaaBasic",
|
||||
|
@ -11108,7 +11106,7 @@ const _sfc_main$4 = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const __unplugin_components_0 = /* @__PURE__ */ _export_sfc(_sfc_main$4, [["__scopeId", "data-v-d479d5bf"]]);
|
||||
const __unplugin_components_0 = /* @__PURE__ */ _export_sfc(_sfc_main$4, [["__scopeId", "data-v-b2d35691"]]);
|
||||
const _sfc_main$3 = {
|
||||
__name: "MaaRogue",
|
||||
setup(__props) {
|
||||
|
|
94
ui/dist/assets/Plan.css
vendored
94
ui/dist/assets/Plan.css
vendored
|
@ -1,56 +1,56 @@
|
|||
.select-label[data-v-ef0a8ed8] {
|
||||
.select-label[data-v-5c30c73a] {
|
||||
width: 44px;
|
||||
}
|
||||
.type-select[data-v-ef0a8ed8] {
|
||||
.type-select[data-v-5c30c73a] {
|
||||
width: 100px;
|
||||
margin-right: 8px;
|
||||
}
|
||||
.product-select[data-v-ef0a8ed8] {
|
||||
.product-select[data-v-5c30c73a] {
|
||||
width: 180px;
|
||||
margin-right: 8px;
|
||||
}
|
||||
.operator-select[data-v-ef0a8ed8] {
|
||||
.operator-select[data-v-5c30c73a] {
|
||||
width: 220px;
|
||||
}
|
||||
.replacement-select[data-v-ef0a8ed8] {
|
||||
.replacement-select[data-v-5c30c73a] {
|
||||
min-width: 400px;
|
||||
}
|
||||
.plan-container[data-v-ef0a8ed8] {
|
||||
.plan-container[data-v-5c30c73a] {
|
||||
width: 980px;
|
||||
min-width: 980px;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 12px;
|
||||
}
|
||||
.group[data-v-ef0a8ed8] {
|
||||
.group[data-v-5c30c73a] {
|
||||
width: 160px;
|
||||
}
|
||||
.facility-2[data-v-ef0a8ed8] {
|
||||
.facility-2[data-v-5c30c73a] {
|
||||
width: 124px;
|
||||
height: 76px;
|
||||
margin: 2px 3px;
|
||||
}
|
||||
.facility-3[data-v-ef0a8ed8] {
|
||||
.facility-3[data-v-5c30c73a] {
|
||||
width: 175px;
|
||||
height: 76px;
|
||||
margin: 2px 3px;
|
||||
}
|
||||
.facility-5[data-v-ef0a8ed8] {
|
||||
.facility-5[data-v-5c30c73a] {
|
||||
width: 277px;
|
||||
height: 76px;
|
||||
margin: 2px 3px;
|
||||
}
|
||||
.avatars[data-v-ef0a8ed8] {
|
||||
.avatars[data-v-5c30c73a] {
|
||||
display: flex;
|
||||
gap: 6px;
|
||||
z-index: 5;
|
||||
}
|
||||
.avatars img[data-v-ef0a8ed8] {
|
||||
.avatars img[data-v-5c30c73a] {
|
||||
box-sizing: content-box;
|
||||
border-radius: 2px;
|
||||
background: lightgray;
|
||||
}
|
||||
.facility-name[data-v-ef0a8ed8] {
|
||||
.facility-name[data-v-5c30c73a] {
|
||||
margin-bottom: 4px;
|
||||
text-align: center;
|
||||
line-height: 1;
|
||||
|
@ -58,83 +58,83 @@
|
|||
justify-content: space-around;
|
||||
z-index: 5;
|
||||
}
|
||||
.outer[data-v-ef0a8ed8] {
|
||||
.outer[data-v-5c30c73a] {
|
||||
display: flex;
|
||||
margin: 0 auto;
|
||||
}
|
||||
.left_box[data-v-ef0a8ed8] {
|
||||
.left_box[data-v-5c30c73a] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 4px;
|
||||
padding-top: 82px;
|
||||
padding-right: 2px;
|
||||
}
|
||||
.left_box .left_contain[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain[data-v-5c30c73a] {
|
||||
display: grid;
|
||||
grid-template-columns: 1fr 1fr 1fr;
|
||||
gap: 4px;
|
||||
}
|
||||
.left_box .left_contain > div[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain > div[data-v-5c30c73a] {
|
||||
box-sizing: border-box;
|
||||
width: 175px;
|
||||
height: 76px;
|
||||
cursor: pointer;
|
||||
}
|
||||
.left_box .left_contain .info[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain .info[data-v-5c30c73a] {
|
||||
background-color: rgba(32, 128, 240, 0.16);
|
||||
border-radius: 3px;
|
||||
border: 1px solid transparent;
|
||||
transition: all 0.3s;
|
||||
position: relative;
|
||||
}
|
||||
.left_box .left_contain .info[data-v-ef0a8ed8]:hover {
|
||||
.left_box .left_contain .info[data-v-5c30c73a]:hover {
|
||||
background-color: rgba(32, 128, 240, 0.22);
|
||||
}
|
||||
.left_box .left_contain .info.true[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain .info.true[data-v-5c30c73a] {
|
||||
background-color: var(--n-color);
|
||||
border: 1px solid rgb(32, 128, 240);
|
||||
}
|
||||
.left_box .left_contain .info .facility-name[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain .info .facility-name[data-v-5c30c73a] {
|
||||
color: #2080f0;
|
||||
}
|
||||
.left_box .left_contain .warning[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain .warning[data-v-5c30c73a] {
|
||||
background-color: rgba(240, 160, 32, 0.16);
|
||||
border-radius: 3px;
|
||||
border: 1px solid transparent;
|
||||
transition: all 0.3s;
|
||||
position: relative;
|
||||
}
|
||||
.left_box .left_contain .warning[data-v-ef0a8ed8]:hover {
|
||||
.left_box .left_contain .warning[data-v-5c30c73a]:hover {
|
||||
background-color: rgba(240, 160, 32, 0.22);
|
||||
}
|
||||
.left_box .left_contain .warning.true[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain .warning.true[data-v-5c30c73a] {
|
||||
background-color: var(--n-color);
|
||||
border: 1px solid rgb(240, 160, 32);
|
||||
}
|
||||
.left_box .left_contain .warning .facility-name[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain .warning .facility-name[data-v-5c30c73a] {
|
||||
color: #f0a020;
|
||||
}
|
||||
.left_box .left_contain .primary[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain .primary[data-v-5c30c73a] {
|
||||
background-color: rgba(24, 160, 88, 0.16);
|
||||
border-radius: 3px;
|
||||
border: 1px solid transparent;
|
||||
transition: all 0.3s;
|
||||
}
|
||||
.left_box .left_contain .primary[data-v-ef0a8ed8]:hover {
|
||||
.left_box .left_contain .primary[data-v-5c30c73a]:hover {
|
||||
background-color: rgba(24, 160, 88, 0.22);
|
||||
}
|
||||
.left_box .left_contain .primary.true[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain .primary.true[data-v-5c30c73a] {
|
||||
background-color: var(--n-color);
|
||||
border: 1px solid rgb(24, 160, 88);
|
||||
}
|
||||
.left_box .left_contain .primary .facility-name[data-v-ef0a8ed8] {
|
||||
.left_box .left_contain .primary .facility-name[data-v-5c30c73a] {
|
||||
color: #18a058;
|
||||
}
|
||||
.mid_box[data-v-ef0a8ed8] {
|
||||
.mid_box[data-v-5c30c73a] {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}
|
||||
.waiting[data-v-ef0a8ed8] {
|
||||
.waiting[data-v-5c30c73a] {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
display: flex;
|
||||
|
@ -146,15 +146,15 @@
|
|||
cursor: pointer;
|
||||
border-radius: 3px;
|
||||
}
|
||||
.waiting[data-v-ef0a8ed8]:hover {
|
||||
.waiting[data-v-5c30c73a]:hover {
|
||||
opacity: 1;
|
||||
border: 1px dashed rgb(54, 173, 106);
|
||||
color: rgb(54, 173, 106);
|
||||
}
|
||||
.waiting div[data-v-ef0a8ed8] {
|
||||
.waiting div[data-v-5c30c73a] {
|
||||
text-align: center;
|
||||
}
|
||||
.draggable[data-v-ef0a8ed8] {
|
||||
.draggable[data-v-5c30c73a] {
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
display: flex;
|
||||
|
@ -162,7 +162,7 @@
|
|||
align-items: center;
|
||||
justify-content: center;
|
||||
}
|
||||
.product-bg[data-v-ef0a8ed8] {
|
||||
.product-bg[data-v-5c30c73a] {
|
||||
content: "";
|
||||
position: absolute;
|
||||
top: 0;
|
||||
|
@ -176,10 +176,10 @@
|
|||
z-index: 3;
|
||||
pointer-events: none;
|
||||
}
|
||||
.avatar-wrapper[data-v-ef0a8ed8] {
|
||||
.avatar-wrapper[data-v-5c30c73a] {
|
||||
position: relative;
|
||||
}
|
||||
.workaholic[data-v-ef0a8ed8] {
|
||||
.workaholic[data-v-5c30c73a] {
|
||||
position: absolute;
|
||||
content: "";
|
||||
top: 0;
|
||||
|
@ -193,14 +193,14 @@
|
|||
.n-base-selection-placeholder .n-avatar {
|
||||
display: none;
|
||||
}
|
||||
.n-table[data-v-ab1299ac] {
|
||||
.n-table[data-v-1ad93d71] {
|
||||
min-width: 100%;
|
||||
}
|
||||
.n-table th[data-v-ab1299ac] {
|
||||
.n-table th[data-v-1ad93d71] {
|
||||
width: 124px;
|
||||
box-sizing: border-box;
|
||||
}
|
||||
.label[data-v-ab1299ac] {
|
||||
.label[data-v-1ad93d71] {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
align-items: center;
|
||||
|
@ -218,26 +218,26 @@
|
|||
.dropdown-select {
|
||||
flex: 1;
|
||||
}
|
||||
.w-980[data-v-d06834f4] {
|
||||
.w-980[data-v-a3e17cad] {
|
||||
width: 100%;
|
||||
max-width: 980px;
|
||||
}
|
||||
.mx-auto[data-v-d06834f4] {
|
||||
.mx-auto[data-v-a3e17cad] {
|
||||
margin: 0 auto;
|
||||
}
|
||||
.mt-12[data-v-d06834f4] {
|
||||
.mt-12[data-v-a3e17cad] {
|
||||
margin-top: 12px;
|
||||
}
|
||||
.mb-12[data-v-d06834f4] {
|
||||
.mb-12[data-v-a3e17cad] {
|
||||
margin-bottom: 12px;
|
||||
}
|
||||
.px-12[data-v-d06834f4] {
|
||||
.px-12[data-v-a3e17cad] {
|
||||
padding: 0 12px;
|
||||
}
|
||||
.mw-980[data-v-d06834f4] {
|
||||
.mw-980[data-v-a3e17cad] {
|
||||
min-width: 980px;
|
||||
}
|
||||
.plan-bar[data-v-d06834f4] {
|
||||
.plan-bar[data-v-a3e17cad] {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
flex-grow: 0;
|
||||
|
|
6
ui/dist/assets/Plan.js
vendored
6
ui/dist/assets/Plan.js
vendored
|
@ -1429,7 +1429,7 @@ const _sfc_main$4 = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const __unplugin_components_9 = /* @__PURE__ */ _export_sfc(_sfc_main$4, [["__scopeId", "data-v-ef0a8ed8"]]);
|
||||
const __unplugin_components_9 = /* @__PURE__ */ _export_sfc(_sfc_main$4, [["__scopeId", "data-v-5c30c73a"]]);
|
||||
const _sfc_main$3 = {
|
||||
__name: "TriggerString",
|
||||
props: ["data"],
|
||||
|
@ -1686,7 +1686,7 @@ const _sfc_main$2 = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const __unplugin_components_2 = /* @__PURE__ */ _export_sfc(_sfc_main$2, [["__scopeId", "data-v-ab1299ac"]]);
|
||||
const __unplugin_components_2 = /* @__PURE__ */ _export_sfc(_sfc_main$2, [["__scopeId", "data-v-1ad93d71"]]);
|
||||
const _hoisted_1$1 = { class: "dropdown-container" };
|
||||
const _hoisted_2 = { class: "dropdown-label" };
|
||||
const _sfc_main$1 = {
|
||||
|
@ -5326,7 +5326,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const Plan = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-d06834f4"]]);
|
||||
const Plan = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-a3e17cad"]]);
|
||||
export {
|
||||
Plan as default
|
||||
};
|
||||
|
|
4
ui/dist/assets/RecordLine.css
vendored
4
ui/dist/assets/RecordLine.css
vendored
|
@ -1,8 +1,8 @@
|
|||
|
||||
[data-v-b9b10beb] .n-modal-container {
|
||||
[data-v-b9d6a27e] .n-modal-container {
|
||||
top: 0 !important;
|
||||
left: 0 !important;
|
||||
}
|
||||
[data-v-b9b10beb] .n-card__content {
|
||||
[data-v-b9d6a27e] .n-card__content {
|
||||
height: calc(100% - 60px); /* 60px为header高度 */
|
||||
}
|
||||
|
|
2
ui/dist/assets/RecordLine.js
vendored
2
ui/dist/assets/RecordLine.js
vendored
|
@ -169,7 +169,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const RecordLine = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-b9b10beb"]]);
|
||||
const RecordLine = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-b9d6a27e"]]);
|
||||
export {
|
||||
RecordLine as default
|
||||
};
|
||||
|
|
2
ui/dist/assets/SlickOperatorSelect.css
vendored
2
ui/dist/assets/SlickOperatorSelect.css
vendored
|
@ -1,4 +1,4 @@
|
|||
|
||||
.width100[data-v-67be031c] {
|
||||
.width100[data-v-2ee926d4] {
|
||||
width: 100%;
|
||||
}
|
||||
|
|
2
ui/dist/assets/SlickOperatorSelect.js
vendored
2
ui/dist/assets/SlickOperatorSelect.js
vendored
|
@ -4754,7 +4754,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const __unplugin_components_16 = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-67be031c"]]);
|
||||
const __unplugin_components_16 = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-2ee926d4"]]);
|
||||
export {
|
||||
__unplugin_components_14,
|
||||
__unplugin_components_16,
|
||||
|
|
24
ui/dist/assets/depot.css
vendored
24
ui/dist/assets/depot.css
vendored
|
@ -1,46 +1,46 @@
|
|||
|
||||
.info-container[data-v-6f42e5d7] {
|
||||
.info-container[data-v-f632a889] {
|
||||
margin-top: 2rem;
|
||||
gap: 12px;
|
||||
}
|
||||
.scan-time[data-v-6f42e5d7] {
|
||||
.scan-time[data-v-f632a889] {
|
||||
font-size: 0.95rem;
|
||||
color: var(--n-text-color);
|
||||
}
|
||||
.notes[data-v-6f42e5d7] {
|
||||
.notes[data-v-f632a889] {
|
||||
font-size: 0.9rem;
|
||||
line-height: 1.5;
|
||||
}
|
||||
.action-group[data-v-6f42e5d7] {
|
||||
.action-group[data-v-f632a889] {
|
||||
padding: 8px 0;
|
||||
}
|
||||
.action-btn[data-v-6f42e5d7] {
|
||||
.action-btn[data-v-f632a889] {
|
||||
transition: transform 0.2s ease;
|
||||
}
|
||||
.action-btn[data-v-6f42e5d7]:hover {
|
||||
.action-btn[data-v-f632a889]:hover {
|
||||
transform: translateY(-1px);
|
||||
}
|
||||
.category-title[data-v-6f42e5d7] {
|
||||
.category-title[data-v-f632a889] {
|
||||
margin: 1rem 0;
|
||||
color: var(--n-title-text-color);
|
||||
}
|
||||
.material-grid[data-v-6f42e5d7] {
|
||||
.material-grid[data-v-f632a889] {
|
||||
padding: 0 8px;
|
||||
}
|
||||
.material-card[data-v-6f42e5d7] {
|
||||
.material-card[data-v-f632a889] {
|
||||
padding: 4px;
|
||||
background: var(--n-color-modal);
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
|
||||
transition: box-shadow 0.2s ease;
|
||||
}
|
||||
.material-card[data-v-6f42e5d7]:hover {
|
||||
.material-card[data-v-f632a889]:hover {
|
||||
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
|
||||
}
|
||||
.material-name[data-v-6f42e5d7] {
|
||||
.material-name[data-v-f632a889] {
|
||||
font-weight: 500;
|
||||
font-size: 1rem;
|
||||
}
|
||||
.material-count[data-v-6f42e5d7] {
|
||||
.material-count[data-v-f632a889] {
|
||||
font-size: 0.9rem;
|
||||
}
|
||||
|
|
2
ui/dist/assets/depot.js
vendored
2
ui/dist/assets/depot.js
vendored
|
@ -386,7 +386,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const depot = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-6f42e5d7"]]);
|
||||
const depot = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-f632a889"]]);
|
||||
export {
|
||||
depot as default
|
||||
};
|
||||
|
|
4
ui/dist/assets/index.css
vendored
4
ui/dist/assets/index.css
vendored
|
@ -7,10 +7,10 @@
|
|||
pointer-events: none !important;
|
||||
}
|
||||
|
||||
.tabs[data-v-7f2c8f94] {
|
||||
.tabs[data-v-cac58959] {
|
||||
height: 100%;
|
||||
}
|
||||
.layout-container[data-v-7f2c8f94] {
|
||||
.layout-container[data-v-cac58959] {
|
||||
height: 100%;
|
||||
}
|
||||
html,
|
||||
|
|
50
ui/dist/assets/index.js
vendored
50
ui/dist/assets/index.js
vendored
|
@ -11130,12 +11130,22 @@ const seen = {};
|
|||
const __vitePreload = function preload(baseModule, deps, importerUrl) {
|
||||
let promise = Promise.resolve();
|
||||
if (deps && deps.length > 0) {
|
||||
let allSettled2 = function(promises) {
|
||||
return Promise.all(
|
||||
promises.map(
|
||||
(p2) => Promise.resolve(p2).then(
|
||||
(value) => ({ status: "fulfilled", value }),
|
||||
(reason) => ({ status: "rejected", reason })
|
||||
)
|
||||
)
|
||||
);
|
||||
};
|
||||
document.getElementsByTagName("link");
|
||||
const cspNonceMeta = document.querySelector(
|
||||
"meta[property=csp-nonce]"
|
||||
);
|
||||
const cspNonce = (cspNonceMeta == null ? void 0 : cspNonceMeta.nonce) || (cspNonceMeta == null ? void 0 : cspNonceMeta.getAttribute("nonce"));
|
||||
promise = Promise.allSettled(
|
||||
promise = allSettled2(
|
||||
deps.map((dep) => {
|
||||
dep = assetsURL(dep);
|
||||
if (dep in seen) return;
|
||||
|
@ -23779,21 +23789,6 @@ const noop$1 = () => {
|
|||
const toFiniteNumber = (value, defaultValue) => {
|
||||
return value != null && Number.isFinite(value = +value) ? value : defaultValue;
|
||||
};
|
||||
const ALPHA = "abcdefghijklmnopqrstuvwxyz";
|
||||
const DIGIT = "0123456789";
|
||||
const ALPHABET = {
|
||||
DIGIT,
|
||||
ALPHA,
|
||||
ALPHA_DIGIT: ALPHA + ALPHA.toUpperCase() + DIGIT
|
||||
};
|
||||
const generateString = (size2 = 16, alphabet = ALPHABET.ALPHA_DIGIT) => {
|
||||
let str = "";
|
||||
const { length } = alphabet;
|
||||
while (size2--) {
|
||||
str += alphabet[Math.random() * length | 0];
|
||||
}
|
||||
return str;
|
||||
};
|
||||
function isSpecCompliantForm(thing) {
|
||||
return !!(thing && isFunction(thing.append) && thing[Symbol.toStringTag] === "FormData" && thing[Symbol.iterator]);
|
||||
}
|
||||
|
@ -23892,8 +23887,6 @@ const utils$1 = {
|
|||
findKey,
|
||||
global: _global,
|
||||
isContextDefined,
|
||||
ALPHABET,
|
||||
generateString,
|
||||
isSpecCompliantForm,
|
||||
toJSONObject,
|
||||
isAsyncFn,
|
||||
|
@ -24849,8 +24842,9 @@ function isAbsoluteURL(url) {
|
|||
function combineURLs(baseURL, relativeURL) {
|
||||
return relativeURL ? baseURL.replace(/\/?\/$/, "") + "/" + relativeURL.replace(/^\/+/, "") : baseURL;
|
||||
}
|
||||
function buildFullPath(baseURL, requestedURL) {
|
||||
if (baseURL && !isAbsoluteURL(requestedURL)) {
|
||||
function buildFullPath(baseURL, requestedURL, allowAbsoluteUrls) {
|
||||
let isRelativeUrl = !isAbsoluteURL(requestedURL);
|
||||
if (baseURL && (isRelativeUrl || allowAbsoluteUrls == false)) {
|
||||
return combineURLs(baseURL, requestedURL);
|
||||
}
|
||||
return requestedURL;
|
||||
|
@ -24937,7 +24931,7 @@ const resolveConfig = (config) => {
|
|||
const newConfig = mergeConfig$1({}, config);
|
||||
let { data, withXSRFToken, xsrfHeaderName, xsrfCookieName, headers, auth } = newConfig;
|
||||
newConfig.headers = headers = AxiosHeaders$1.from(headers);
|
||||
newConfig.url = buildURL(buildFullPath(newConfig.baseURL, newConfig.url), config.params, config.paramsSerializer);
|
||||
newConfig.url = buildURL(buildFullPath(newConfig.baseURL, newConfig.url, newConfig.allowAbsoluteUrls), config.params, config.paramsSerializer);
|
||||
if (auth) {
|
||||
headers.set(
|
||||
"Authorization",
|
||||
|
@ -25454,7 +25448,7 @@ function dispatchRequest(config) {
|
|||
return Promise.reject(reason);
|
||||
});
|
||||
}
|
||||
const VERSION$1 = "1.7.9";
|
||||
const VERSION$1 = "1.8.4";
|
||||
const validators$1 = {};
|
||||
["object", "boolean", "number", "function", "string", "symbol"].forEach((type, i) => {
|
||||
validators$1[type] = function validator2(thing) {
|
||||
|
@ -25582,6 +25576,12 @@ let Axios$1 = class Axios {
|
|||
}, true);
|
||||
}
|
||||
}
|
||||
if (config.allowAbsoluteUrls !== void 0) ;
|
||||
else if (this.defaults.allowAbsoluteUrls !== void 0) {
|
||||
config.allowAbsoluteUrls = this.defaults.allowAbsoluteUrls;
|
||||
} else {
|
||||
config.allowAbsoluteUrls = true;
|
||||
}
|
||||
validator.assertOptions(config, {
|
||||
baseUrl: validators.spelling("baseURL"),
|
||||
withXsrfToken: validators.spelling("withXSRFToken")
|
||||
|
@ -25652,7 +25652,7 @@ let Axios$1 = class Axios {
|
|||
}
|
||||
getUri(config) {
|
||||
config = mergeConfig$1(this.defaults, config);
|
||||
const fullPath = buildFullPath(config.baseURL, config.url);
|
||||
const fullPath = buildFullPath(config.baseURL, config.url, config.allowAbsoluteUrls);
|
||||
return buildURL(fullPath, config.params, config.paramsSerializer);
|
||||
}
|
||||
};
|
||||
|
@ -26087,7 +26087,7 @@ function getDefaultExportFromCjs(x) {
|
|||
return x && x.__esModule && Object.prototype.hasOwnProperty.call(x, "default") ? x["default"] : x;
|
||||
}
|
||||
function getAugmentedNamespace(n) {
|
||||
if (n.__esModule) return n;
|
||||
if (Object.prototype.hasOwnProperty.call(n, "__esModule")) return n;
|
||||
var f = n.default;
|
||||
if (typeof f == "function") {
|
||||
var a = function a2() {
|
||||
|
@ -28150,7 +28150,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const App = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-7f2c8f94"]]);
|
||||
const App = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-cac58959"]]);
|
||||
/*!
|
||||
* vue-router v4.5.0
|
||||
* (c) 2024 Eduardo San Martin Morote
|
||||
|
|
2
ui/dist/assets/index3.js
vendored
2
ui/dist/assets/index3.js
vendored
|
@ -1,5 +1,5 @@
|
|||
import { init, throttle } from "./install.js";
|
||||
import { defineComponent, h, shallowRef, inject, toRefs, computed, getCurrentInstance, watch, watchEffect, onMounted, onBeforeUnmount, isRef, unref, nextTick } from "./index.js";
|
||||
import { defineComponent, h, shallowRef, inject, toRefs, computed, getCurrentInstance, watch, watchEffect, onMounted, onBeforeUnmount, nextTick, isRef, unref } from "./index.js";
|
||||
const METHOD_NAMES = [
|
||||
"getWidth",
|
||||
"getHeight",
|
||||
|
|
8
ui/dist/assets/paomadeng.css
vendored
8
ui/dist/assets/paomadeng.css
vendored
|
@ -1,17 +1,17 @@
|
|||
|
||||
/* Alert 样式 */
|
||||
.custom-alert[data-v-01ea1f7d] {
|
||||
.custom-alert[data-v-a64db44c] {
|
||||
position: fixed;
|
||||
z-index: 5000;
|
||||
|
||||
margin-top: 10px;
|
||||
left: var(--abe189c8);
|
||||
right: var(--abe189c8);
|
||||
left: var(--1b8e3d43);
|
||||
right: var(--1b8e3d43);
|
||||
|
||||
background: linear-gradient(to right, rgba(0, 0, 0, 0), rgba(0, 0, 255, 0.5), rgba(0, 0, 0, 0));
|
||||
}
|
||||
|
||||
/* Marquee 样式 */
|
||||
.custom-marquee[data-v-01ea1f7d] {
|
||||
.custom-marquee[data-v-a64db44c] {
|
||||
pointer-events: none;
|
||||
}
|
||||
|
|
4
ui/dist/assets/paomadeng.js
vendored
4
ui/dist/assets/paomadeng.js
vendored
|
@ -294,7 +294,7 @@ const _sfc_main = {
|
|||
__name: "paomadeng",
|
||||
setup(__props) {
|
||||
useCssVars((_ctx) => ({
|
||||
"abe189c8": margin_x.value
|
||||
"1b8e3d43": margin_x.value
|
||||
}));
|
||||
const mower_store = useMowerStore();
|
||||
const { speed_msg } = storeToRefs(mower_store);
|
||||
|
@ -343,7 +343,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const paomadeng = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-01ea1f7d"]]);
|
||||
const paomadeng = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-a64db44c"]]);
|
||||
export {
|
||||
paomadeng as default
|
||||
};
|
||||
|
|
4
ui/dist/assets/report.css
vendored
4
ui/dist/assets/report.css
vendored
|
@ -1,8 +1,8 @@
|
|||
|
||||
.chart[data-v-291ce212] {
|
||||
.chart[data-v-d6be4beb] {
|
||||
height: 400px;
|
||||
}
|
||||
.report-card_1[data-v-291ce212] {
|
||||
.report-card_1[data-v-d6be4beb] {
|
||||
display: gird;
|
||||
flex-direction: column;
|
||||
align-items: center;
|
||||
|
|
2
ui/dist/assets/report.js
vendored
2
ui/dist/assets/report.js
vendored
|
@ -565,7 +565,7 @@ const _sfc_main = {
|
|||
};
|
||||
}
|
||||
};
|
||||
const report = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-291ce212"]]);
|
||||
const report = /* @__PURE__ */ _export_sfc(_sfc_main, [["__scopeId", "data-v-d6be4beb"]]);
|
||||
export {
|
||||
report as default
|
||||
};
|
||||
|
|
24
ui/dist/index.html
vendored
24
ui/dist/index.html
vendored
|
@ -1,14 +1,14 @@
|
|||
<!doctype html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" href="/favicon.ico" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>mower-ng webui</title>
|
||||
<!doctype html>
|
||||
<html lang="zh-CN">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<link rel="icon" href="/favicon.ico" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>mower-ng webui</title>
|
||||
<script type="module" crossorigin src="/assets/index.js"></script>
|
||||
<link rel="stylesheet" crossorigin href="/assets/index.css">
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
</body>
|
||||
</html>
|
||||
</head>
|
||||
<body>
|
||||
<div id="app"></div>
|
||||
</body>
|
||||
</html>
|
||||
|
|
|
@ -79,13 +79,9 @@ const stageAvailability = computed(() => {
|
|||
const availabilityMap = new Map()
|
||||
const allDaysAvailable = Array(7).fill(true) // 默认每天都可用
|
||||
|
||||
// 重要关卡和自定义关卡默认全周可用
|
||||
// 1. 重要关卡默认全周可用
|
||||
general_important_stages.forEach((stage) => availabilityMap.set(stage, allDaysAvailable))
|
||||
conf.value.custom_stages.forEach((stage) => {
|
||||
if (stage) availabilityMap.set(stage, allDaysAvailable)
|
||||
})
|
||||
|
||||
// 非重要关卡(材料、芯片等)根据 time_table 计算可用性
|
||||
general_unimportant_stages.forEach((stage) => {
|
||||
let availableDays = allDaysAvailable // 默认为全周可用 (如 1-7)
|
||||
for (const [prefix, days] of Object.entries(time_table)) {
|
||||
|
@ -102,6 +98,12 @@ const stageAvailability = computed(() => {
|
|||
}
|
||||
availabilityMap.set(stage, availableDays)
|
||||
})
|
||||
|
||||
conf.value.custom_stages.forEach((stage) => {
|
||||
if (stage) {
|
||||
availabilityMap.set(stage, allDaysAvailable)
|
||||
}
|
||||
})
|
||||
return availabilityMap
|
||||
})
|
||||
|
||||
|
@ -110,18 +112,17 @@ const stageAvailability = computed(() => {
|
|||
const plan = computed(() => {
|
||||
const result = []
|
||||
const currentPlan = conf.value.weekly_plan
|
||||
const availabilityMap = stageAvailability.value
|
||||
|
||||
// 为单个关卡生成一周的状态(是否可用,是否已启用)和整体选择状态(全选/半选/空)
|
||||
const generateWeekAndState = (stage, isCustom) => {
|
||||
const week = [] // 存储一周七天的状态
|
||||
const available = availabilityMap.get(stage) ?? Array(7).fill(true) // 获取该关卡的可用性
|
||||
const available = stageAvailability.value.get(stage) ?? Array(7).fill(true)
|
||||
const planType = isCustom ? 'custom' : 'general' // 判断是通用关卡还是自定义关卡
|
||||
let availableCount = 0 // 本周可用天数
|
||||
let enabledCount = 0 // 本周已启用天数
|
||||
|
||||
for (let dayIndex = 0; dayIndex < 7; ++dayIndex) {
|
||||
const isAvailable = available[dayIndex]
|
||||
const isAvailable = available[dayIndex] // 对于自定义关卡(包括 null),这将是 true
|
||||
const isEnabled = currentPlan[dayIndex]?.[planType]?.includes(stage) ?? false // 检查当天计划中是否包含此关卡
|
||||
week.push({
|
||||
available: isAvailable,
|
||||
|
@ -138,16 +139,14 @@ const plan = computed(() => {
|
|||
// 计算关卡行的整体选择状态
|
||||
let selectionState = 'empty' // 默认空
|
||||
if (availableCount > 0) {
|
||||
// 只有在本周有关卡可用的情况下才判断
|
||||
if (enabledCount === 0) {
|
||||
selectionState = 'empty' // 没有启用任何一天
|
||||
selectionState = 'empty'
|
||||
} else if (enabledCount === availableCount) {
|
||||
selectionState = 'all' // 所有可用天都已启用
|
||||
selectionState = 'all'
|
||||
} else {
|
||||
selectionState = 'some' // 部分可用天已启用
|
||||
selectionState = 'some'
|
||||
}
|
||||
}
|
||||
|
||||
return { week, selectionState }
|
||||
}
|
||||
|
||||
|
@ -158,12 +157,7 @@ const plan = computed(() => {
|
|||
conf.value.custom_stages.forEach((stage) =>
|
||||
result.push({
|
||||
stage,
|
||||
...(stage === null // 对空的自定义槽位进行特殊处理
|
||||
? {
|
||||
week: Array(7).fill({ available: true, enable: false }),
|
||||
selectionState: 'empty' // 空槽位默认状态为空
|
||||
}
|
||||
: generateWeekAndState(stage, true)) // 否则正常生成
|
||||
...generateWeekAndState(stage, true) // 对所有自定义关卡(包括 null)调用 generateWeekAndState
|
||||
})
|
||||
)
|
||||
general_unimportant_stages.forEach((stage) =>
|
||||
|
@ -177,6 +171,7 @@ const plan = computed(() => {
|
|||
function toggle_plan(stageIndex, dayIndex) {
|
||||
const stageInfo = plan.value[stageIndex]
|
||||
// 如果关卡信息无效、关卡为空或当天关卡不可用,则不执行任何操作
|
||||
// 对于自定义关卡,stageInfo.week[dayIndex].available 将始终为 true
|
||||
if (!stageInfo || stageInfo.stage === null || !stageInfo.week[dayIndex].available) return
|
||||
|
||||
const stage = stageInfo.stage
|
||||
|
@ -212,6 +207,7 @@ function select_all(stageIndex) {
|
|||
|
||||
// 遍历一周七天
|
||||
for (let dayIndex = 0; dayIndex < 7; ++dayIndex) {
|
||||
// 对于自定义关卡, availabilityWeek[dayIndex].available 将始终为 true
|
||||
if (availabilityWeek[dayIndex].available) {
|
||||
// 只对关卡可用的日期进行操作
|
||||
const list = conf.value.weekly_plan[dayIndex][planType]
|
||||
|
@ -431,6 +427,7 @@ const todayName = computed(() => weekday_display_name[todayIndex.value] || '') /
|
|||
<div>{{ a.name }}</div>
|
||||
</n-tag>
|
||||
</template>
|
||||
|
||||
<div>
|
||||
结束时间:{{
|
||||
DateTime.fromSeconds(a.end)
|
||||
|
@ -448,6 +445,7 @@ const todayName = computed(() => weekday_display_name[todayIndex.value] || '') /
|
|||
.toHuman()
|
||||
}}
|
||||
</div>
|
||||
<div>{{ a.drop }} 已有:{{ a.count }}</div>
|
||||
</n-tooltip>
|
||||
</template>
|
||||
<div v-else>当前暂无活动开放</div>
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue