130 lines
4.3 KiB
Python
130 lines
4.3 KiB
Python
# -*- coding = utf-8 -*-
|
|
# @Time : 2021/12/24 19:54
|
|
# @Author : AuYang
|
|
# @File : pvz.py
|
|
# @Software : PyCharm
|
|
from ctypes import *
|
|
import numpy as np
|
|
import cv2
|
|
import sys
|
|
import torch
|
|
from models.experimental import attempt_load
|
|
from utils.augmentations import letterbox
|
|
from utils.general import (non_max_suppression, xyxy2xywh)
|
|
from utils.torch_utils import select_device
|
|
from utils.plots import Annotator, colors, save_one_box
|
|
import time
|
|
import pyautogui
|
|
from threading import Thread
|
|
|
|
device = select_device('cpu')
|
|
|
|
model = attempt_load('./allBest.pt', map_location=device)
|
|
names = model.module.names if hasattr(model, 'module') else model.names
|
|
|
|
# image = cv2.imread('pvzImgs/218.jpg')
|
|
|
|
win = pyautogui.getWindowsWithTitle('植物大战僵尸中文版')[0]
|
|
win.activate()
|
|
time.sleep(0.3)
|
|
start = time.time()
|
|
|
|
|
|
def getAShot():
|
|
image = pyautogui.screenshot(region=win.box)
|
|
return cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)
|
|
|
|
|
|
def getAShell(image):
|
|
with torch.no_grad():
|
|
img = letterbox(image)[0]
|
|
# image = img
|
|
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
|
|
img = np.ascontiguousarray(img)
|
|
img = torch.from_numpy(img).to(device)
|
|
img = img.float() # uint8 to fp16/32
|
|
img /= 255.0 # 0 - 255 to 0.0 - 1.0
|
|
if img.ndimension() == 3:
|
|
img = img.unsqueeze(0)
|
|
pred = model(img, augment=False)[0]
|
|
pred = non_max_suppression(pred, 0.50, 0.0, classes=False, agnostic=False)
|
|
for _, det in enumerate(pred):
|
|
for *xyxy, conf, cls in reversed(det):
|
|
for index, i in enumerate(xyxy):
|
|
xyxy[index] = i * win.box.width / 640
|
|
label = names[int(cls)]
|
|
conf = round(float(conf), 2)
|
|
if conf >= 0.5:
|
|
label = str(label)
|
|
if label == 'shell':
|
|
return xyxy
|
|
|
|
|
|
def fire():
|
|
global start
|
|
while True:
|
|
if time.time() - start < 7.5:
|
|
image = getAShot()
|
|
xyxy = getAShell(image)
|
|
if xyxy:
|
|
annotator = Annotator(image, line_width=2, example=str(names))
|
|
annotator.box_label(xyxy, label='shell')
|
|
draw(image)
|
|
continue
|
|
else:
|
|
start = time.time()
|
|
while True:
|
|
image = getAShot()
|
|
xyxy = getAShell(image)
|
|
if xyxy and len(xyxy) >= 4:
|
|
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4))).view(-1).tolist()
|
|
# print(xywh)
|
|
annotator = Annotator(image, line_width=2, example=str(names))
|
|
annotator.box_label(xyxy, label='shell')
|
|
|
|
pyautogui.moveTo(win.box.left + xywh[0], win.box.top + xywh[1])
|
|
pyautogui.click()
|
|
pyautogui.moveTo(win.box.left + win.box.width * 0.78, win.box.top + win.box.width * 0.3)
|
|
pyautogui.click()
|
|
|
|
time.sleep(0.5)
|
|
while True:
|
|
image = getAShot()
|
|
xyxy = getAShell(image)
|
|
if xyxy and len(xyxy) >= 4:
|
|
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4))).view(-1).tolist()
|
|
# print(xywh)
|
|
annotator = Annotator(image, line_width=2, example=str(names))
|
|
annotator.box_label(xyxy, label='shell')
|
|
|
|
pyautogui.moveTo(win.box.left + xywh[0], win.box.top + xywh[1])
|
|
pyautogui.click()
|
|
pyautogui.moveTo(win.box.left + win.box.width * 0.78,
|
|
win.box.top + win.box.width * 0.63)
|
|
pyautogui.click()
|
|
else:
|
|
continue
|
|
break
|
|
draw(image)
|
|
print("click")
|
|
else:
|
|
continue
|
|
break
|
|
|
|
def draw(img):
|
|
cv2.imshow('img', img)
|
|
k = cv2.waitKey(1)
|
|
if k == 27:
|
|
cv2.destroyAllWindows()
|
|
sys.exit(0)
|
|
|
|
|
|
def main():
|
|
fireThread = Thread(target=fire)
|
|
fireThread.start()
|
|
fireThread.join()
|
|
|
|
|
|
if __name__ == '__main__':
|
|
main()
|