mower-ng/mower/scripts/build_ra_full_map.py
2025-07-08 17:20:42 +08:00

114 lines
4.5 KiB
Python

import os
import cv2
import numpy as np
class Map:
def __init__(self, img_path):
self.img = cv2.imread(img_path)
self.src_pts = np.float32([[0, 0], [1920, 0], [-450, 1080], [2370, 1080]])
self.dst_pts = np.float32([[0, 0], [1920, 0], [0, 1080], [1920, 1080]])
self.trans_mat = cv2.getPerspectiveTransform(self.src_pts, self.dst_pts)
self.map = cv2.warpPerspective(self.img, self.trans_mat, (1920, 1080))
self.map = self.map[137:993, 280:1640]
def stitch_images(images):
stitcher = cv2.Stitcher_create()
status, stitched_img = stitcher.stitch(images)
if status == cv2.Stitcher_OK:
return stitched_img
else:
print(f"Error stitching images: {status}")
return None
def main():
raw_dir = "/home/zhao/Documents/sc/生息重写/full_map/raw"
output_path = "/home/zhao/Documents/sc/生息重写/full_map/full.png"
image_files = sorted(
[os.path.join(raw_dir, f) for f in os.listdir(raw_dir) if f.endswith(".png")]
)
transformed_maps = []
for img_file in image_files:
game_map = Map(img_file)
transformed_maps.append(game_map.map)
if not transformed_maps:
print("No images found to process.")
return
# Assuming the images are taken in a way that they can be stitched horizontally
# A more robust solution would involve feature matching to determine the stitching order and overlap
# For simplicity, let's try a simple horizontal concatenation first.
# This might not be perfect and a real stitcher is preferred.
# Let's try to use OpenCV's stitcher
# Note: The stitcher might not work well with these transformed images without further adjustments.
# It expects some overlap and similar perspectives.
# A manual approach might be more reliable here if the stitcher fails.
# Let's create a large canvas and place the images based on some logic.
# This requires knowing the relative positions of the screenshots.
# Let's assume the images are taken from left to right, with some overlap.
# We can try to find the best horizontal shift between consecutive images.
if len(transformed_maps) > 1:
# Using a simple horizontal stitching for now
# This is a placeholder for a more complex stitching logic
for i in range(1, len(transformed_maps)):
# This is a naive horizontal concatenation and will likely not produce a good result.
# A proper stitching algorithm would find matching keypoints.
# Let's try to implement a more robust stitching
pass # More advanced stitching logic would go here.
# For now, let's just save the first transformed map to verify the transformation
# cv2.imwrite('/home/zhao/Documents/sc/生息重写/full_map/transformed_0.png', transformed_maps[0])
# A simple stitching attempt
try:
stitcher = cv2.Stitcher.create(cv2.Stitcher_PANORAMA)
status, stitched = stitcher.stitch(transformed_maps)
if status == cv2.Stitcher_OK:
cv2.imwrite(output_path, stitched)
print(f"Stitched image saved to {output_path}")
else:
print(f"Stitching failed with status {status}")
# Fallback to manual stitching if OpenCV stitcher fails
print("Falling back to manual stitching.")
# Manual stitching logic (very basic)
# This assumes images are of the same height and are to be stitched horizontally
total_width = sum(img.shape[1] for img in transformed_maps)
max_height = max(img.shape[0] for img in transformed_maps)
stitched_manual = np.zeros((max_height, total_width, 3), dtype=np.uint8)
current_x = 0
for img in transformed_maps:
h, w, _ = img.shape
stitched_manual[0:h, current_x : current_x + w, :] = img
current_x += w # This should be adjusted based on actual overlap
cv2.imwrite(output_path, stitched_manual)
print(f"Manually stitched image saved to {output_path}")
except cv2.error as e:
print(f"An OpenCV error occurred: {e}")
print(
"Stitching with the default stitcher failed. You might need a more advanced approach."
)
elif len(transformed_maps) == 1:
cv2.imwrite(output_path, transformed_maps[0])
print(f"Single image processed and saved to {output_path}")
if __name__ == "__main__":
main()