Files
AI_agent_detect/config.py

70 lines
1.9 KiB
Python
Raw Normal View History

2025-12-02 17:16:26 +08:00
from typing import List
import torch
from pydantic import BaseModel
# 设备配置
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
# 默认检测参数
DEFAULT_CONF = 0.25
DEFAULT_IOU = 0.5
DEFAULT_MIN_SIZE = 8
DEFAULT_POS_THRESH = 5
MODEL_CONFIGS = {
"安全施工模型": {
"model_path": "models/ppe_state_model/best.pt",
"types": ["novest", "nohelmet"],
"type_to_id": {"novest": 0, "nohelmet": 2},
"params": {
"enable_primary": True,
"primary_conf": 0.55,
"secondary_conf": 0.6,
"final_conf": 0.65,
"enable_multi_scale": True,
"multi_scales": [0.75, 1.0, 1.25],
"enable_secondary": True,
"slice_size": 512,
"overlap_ratio": 0.3,
"weight_primary": 0.4,
"weight_secondary": 0.6
}
},
"烟雾火灾模型": {
"model_path": "models/fire_smoke_model/best.pt",
"types": ["fire", "smoke"],
"type_to_id": {"fire": 0, "smoke": 1},
"params": {
"enable_primary": True,
"primary_conf": 0.99,
"secondary_conf": 0.99,
"final_conf": 0.99,
"enable_multi_scale": True,
"multi_scales": [0.75, 1.0, 1.25],
"enable_secondary": True,
"slice_size": 512,
"overlap_ratio": 0.3,
"weight_primary": 0.4,
"weight_secondary": 0.6
}
}
}
# SAHI自适应切片配置
SLICE_RULES = [
(12_000_000, (384, 0.35)),
(3_000_000, (512, 0.3)),
(0, (640, 0.25))
]
class DetectionResponse(BaseModel):
hasTarget: int
originalImgSize: List[int]
targets: List[dict]
processing_errors: List[str] = []
# 智能体相关
OLLAMA_MODEL = "alibayram/Qwen3-30B-A3B-Instruct-2507:latest" # 当前最强本地模型
OLLAMA_BASE_URL = "http://192.168.110.5:11434"