mirror of
https://github.com/dptech-corp/Uni-Lab-OS.git
synced 2026-02-12 18:55:11 +00:00
Compare commits
13 Commits
feat/sampl
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
07cf690897 | ||
|
|
cfea27460a | ||
|
|
b7d3e980a9 | ||
|
|
f9ed6cb3fb | ||
|
|
699a0b3ce7 | ||
|
|
cf3a20ae79 | ||
|
|
cdf0652020 | ||
|
|
60073ff139 | ||
|
|
a9053b822f | ||
|
|
d238c2ab8b | ||
|
|
9a7d5c7c82 | ||
|
|
4f7d431c0b | ||
|
|
341a1b537c |
@@ -171,6 +171,12 @@ def parse_args():
|
||||
action="store_true",
|
||||
help="Disable sending update feedback to server",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--test_mode",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Test mode: all actions simulate execution and return mock results without running real hardware",
|
||||
)
|
||||
# workflow upload subcommand
|
||||
workflow_parser = subparsers.add_parser(
|
||||
"workflow_upload",
|
||||
@@ -204,6 +210,12 @@ def parse_args():
|
||||
default=False,
|
||||
help="Whether to publish the workflow (default: False)",
|
||||
)
|
||||
workflow_parser.add_argument(
|
||||
"--description",
|
||||
type=str,
|
||||
default="",
|
||||
help="Workflow description, used when publishing the workflow",
|
||||
)
|
||||
return parser
|
||||
|
||||
|
||||
@@ -231,52 +243,60 @@ def main():
|
||||
# 加载配置文件,优先加载config,然后从env读取
|
||||
config_path = args_dict.get("config")
|
||||
|
||||
if check_mode:
|
||||
args_dict["working_dir"] = os.path.abspath(os.getcwd())
|
||||
# 当 skip_env_check 时,默认使用当前目录作为 working_dir
|
||||
if skip_env_check and not args_dict.get("working_dir") and not config_path:
|
||||
# === 解析 working_dir ===
|
||||
# 规则1: working_dir 传入 → 检测 unilabos_data 子目录,已是则不修改
|
||||
# 规则2: 仅 config_path 传入 → 用其父目录作为 working_dir
|
||||
# 规则4: 两者都传入 → 各用各的,但 working_dir 仍做 unilabos_data 子目录检测
|
||||
raw_working_dir = args_dict.get("working_dir")
|
||||
if raw_working_dir:
|
||||
working_dir = os.path.abspath(raw_working_dir)
|
||||
elif config_path and os.path.exists(config_path):
|
||||
working_dir = os.path.dirname(os.path.abspath(config_path))
|
||||
else:
|
||||
working_dir = os.path.abspath(os.getcwd())
|
||||
print_status(f"跳过环境检查模式:使用当前目录作为工作目录 {working_dir}", "info")
|
||||
# 检查当前目录是否有 local_config.py
|
||||
local_config_in_cwd = os.path.join(working_dir, "local_config.py")
|
||||
if os.path.exists(local_config_in_cwd):
|
||||
config_path = local_config_in_cwd
|
||||
|
||||
# unilabos_data 子目录自动检测
|
||||
if os.path.basename(working_dir) != "unilabos_data":
|
||||
unilabos_data_sub = os.path.join(working_dir, "unilabos_data")
|
||||
if os.path.isdir(unilabos_data_sub):
|
||||
working_dir = unilabos_data_sub
|
||||
elif not raw_working_dir and not (config_path and os.path.exists(config_path)):
|
||||
# 未显式指定路径,默认使用 cwd/unilabos_data
|
||||
working_dir = os.path.abspath(os.path.join(os.getcwd(), "unilabos_data"))
|
||||
|
||||
# === 解析 config_path ===
|
||||
if config_path and not os.path.exists(config_path):
|
||||
# config_path 传入但不存在,尝试在 working_dir 中查找
|
||||
candidate = os.path.join(working_dir, "local_config.py")
|
||||
if os.path.exists(candidate):
|
||||
config_path = candidate
|
||||
print_status(f"在工作目录中发现配置文件: {config_path}", "info")
|
||||
else:
|
||||
print_status(
|
||||
f"配置文件 {config_path} 不存在,工作目录 {working_dir} 中也未找到 local_config.py,"
|
||||
f"请通过 --config 传入 local_config.py 文件路径",
|
||||
"error",
|
||||
)
|
||||
os._exit(1)
|
||||
elif not config_path:
|
||||
# 规则3: 未传入 config_path,尝试 working_dir/local_config.py
|
||||
candidate = os.path.join(working_dir, "local_config.py")
|
||||
if os.path.exists(candidate):
|
||||
config_path = candidate
|
||||
print_status(f"发现本地配置文件: {config_path}", "info")
|
||||
else:
|
||||
print_status(f"未指定config路径,可通过 --config 传入 local_config.py 文件路径", "info")
|
||||
elif os.getcwd().endswith("unilabos_data"):
|
||||
working_dir = os.path.abspath(os.getcwd())
|
||||
else:
|
||||
working_dir = os.path.abspath(os.path.join(os.getcwd(), "unilabos_data"))
|
||||
|
||||
if args_dict.get("working_dir"):
|
||||
working_dir = args_dict.get("working_dir", "")
|
||||
if config_path and not os.path.exists(config_path):
|
||||
config_path = os.path.join(working_dir, "local_config.py")
|
||||
if not os.path.exists(config_path):
|
||||
print_status(
|
||||
f"当前工作目录 {working_dir} 未找到local_config.py,请通过 --config 传入 local_config.py 文件路径",
|
||||
"error",
|
||||
print_status(f"您是否为第一次使用?并将当前路径 {working_dir} 作为工作目录? (Y/n)", "info")
|
||||
if check_mode or input() != "n":
|
||||
os.makedirs(working_dir, exist_ok=True)
|
||||
config_path = os.path.join(working_dir, "local_config.py")
|
||||
shutil.copy(
|
||||
os.path.join(os.path.dirname(os.path.dirname(__file__)), "config", "example_config.py"),
|
||||
config_path,
|
||||
)
|
||||
print_status(f"已创建 local_config.py 路径: {config_path}", "info")
|
||||
else:
|
||||
os._exit(1)
|
||||
elif config_path and os.path.exists(config_path):
|
||||
working_dir = os.path.dirname(config_path)
|
||||
elif os.path.exists(working_dir) and os.path.exists(os.path.join(working_dir, "local_config.py")):
|
||||
config_path = os.path.join(working_dir, "local_config.py")
|
||||
elif not skip_env_check and not config_path and (
|
||||
not os.path.exists(working_dir) or not os.path.exists(os.path.join(working_dir, "local_config.py"))
|
||||
):
|
||||
print_status(f"未指定config路径,可通过 --config 传入 local_config.py 文件路径", "info")
|
||||
print_status(f"您是否为第一次使用?并将当前路径 {working_dir} 作为工作目录? (Y/n)", "info")
|
||||
if input() != "n":
|
||||
os.makedirs(working_dir, exist_ok=True)
|
||||
config_path = os.path.join(working_dir, "local_config.py")
|
||||
shutil.copy(
|
||||
os.path.join(os.path.dirname(os.path.dirname(__file__)), "config", "example_config.py"), config_path
|
||||
)
|
||||
print_status(f"已创建 local_config.py 路径: {config_path}", "info")
|
||||
else:
|
||||
os._exit(1)
|
||||
|
||||
# 加载配置文件 (check_mode 跳过)
|
||||
print_status(f"当前工作目录为 {working_dir}", "info")
|
||||
@@ -288,7 +308,9 @@ def main():
|
||||
|
||||
if hasattr(BasicConfig, "log_level"):
|
||||
logger.info(f"Log level set to '{BasicConfig.log_level}' from config file.")
|
||||
configure_logger(loglevel=BasicConfig.log_level, working_dir=working_dir)
|
||||
file_path = configure_logger(loglevel=BasicConfig.log_level, working_dir=working_dir)
|
||||
if file_path is not None:
|
||||
logger.info(f"[LOG_FILE] {file_path}")
|
||||
|
||||
if args.addr != parser.get_default("addr"):
|
||||
if args.addr == "test":
|
||||
@@ -332,6 +354,9 @@ def main():
|
||||
BasicConfig.slave_no_host = args_dict.get("slave_no_host", False)
|
||||
BasicConfig.upload_registry = args_dict.get("upload_registry", False)
|
||||
BasicConfig.no_update_feedback = args_dict.get("no_update_feedback", False)
|
||||
BasicConfig.test_mode = args_dict.get("test_mode", False)
|
||||
if BasicConfig.test_mode:
|
||||
print_status("启用测试模式:所有动作将模拟执行,不调用真实硬件", "warning")
|
||||
BasicConfig.communication_protocol = "websocket"
|
||||
machine_name = os.popen("hostname").read().strip()
|
||||
machine_name = "".join([c if c.isalnum() or c == "_" else "_" for c in machine_name])
|
||||
|
||||
@@ -38,9 +38,9 @@ def register_devices_and_resources(lab_registry, gather_only=False) -> Optional[
|
||||
response = http_client.resource_registry({"resources": list(devices_to_register.values())})
|
||||
cost_time = time.time() - start_time
|
||||
if response.status_code in [200, 201]:
|
||||
logger.info(f"[UniLab Register] 成功注册 {len(devices_to_register)} 个设备 {cost_time}ms")
|
||||
logger.info(f"[UniLab Register] 成功注册 {len(devices_to_register)} 个设备 {cost_time}s")
|
||||
else:
|
||||
logger.error(f"[UniLab Register] 设备注册失败: {response.status_code}, {response.text} {cost_time}ms")
|
||||
logger.error(f"[UniLab Register] 设备注册失败: {response.status_code}, {response.text} {cost_time}s")
|
||||
except Exception as e:
|
||||
logger.error(f"[UniLab Register] 设备注册异常: {e}")
|
||||
|
||||
@@ -51,9 +51,9 @@ def register_devices_and_resources(lab_registry, gather_only=False) -> Optional[
|
||||
response = http_client.resource_registry({"resources": list(resources_to_register.values())})
|
||||
cost_time = time.time() - start_time
|
||||
if response.status_code in [200, 201]:
|
||||
logger.info(f"[UniLab Register] 成功注册 {len(resources_to_register)} 个资源 {cost_time}ms")
|
||||
logger.info(f"[UniLab Register] 成功注册 {len(resources_to_register)} 个资源 {cost_time}s")
|
||||
else:
|
||||
logger.error(f"[UniLab Register] 资源注册失败: {response.status_code}, {response.text} {cost_time}ms")
|
||||
logger.error(f"[UniLab Register] 资源注册失败: {response.status_code}, {response.text} {cost_time}s")
|
||||
except Exception as e:
|
||||
logger.error(f"[UniLab Register] 资源注册异常: {e}")
|
||||
|
||||
|
||||
@@ -343,9 +343,10 @@ class HTTPClient:
|
||||
edges: List[Dict[str, Any]],
|
||||
tags: Optional[List[str]] = None,
|
||||
published: bool = False,
|
||||
description: str = "",
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
导入工作流到服务器
|
||||
导入工作流到服务器,如果 published 为 True,则额外发起发布请求
|
||||
|
||||
Args:
|
||||
name: 工作流名称(顶层)
|
||||
@@ -355,6 +356,7 @@ class HTTPClient:
|
||||
edges: 工作流边列表
|
||||
tags: 工作流标签列表,默认为空列表
|
||||
published: 是否发布工作流,默认为False
|
||||
description: 工作流描述,发布时使用
|
||||
|
||||
Returns:
|
||||
Dict: API响应数据,包含 code 和 data (uuid, name)
|
||||
@@ -367,7 +369,6 @@ class HTTPClient:
|
||||
"nodes": nodes,
|
||||
"edges": edges,
|
||||
"tags": tags if tags is not None else [],
|
||||
"published": published,
|
||||
},
|
||||
}
|
||||
# 保存请求到文件
|
||||
@@ -388,11 +389,51 @@ class HTTPClient:
|
||||
res = response.json()
|
||||
if "code" in res and res["code"] != 0:
|
||||
logger.error(f"导入工作流失败: {response.text}")
|
||||
return res
|
||||
# 导入成功后,如果需要发布则额外发起发布请求
|
||||
if published:
|
||||
imported_uuid = res.get("data", {}).get("uuid", workflow_uuid)
|
||||
publish_res = self.workflow_publish(imported_uuid, description)
|
||||
res["publish_result"] = publish_res
|
||||
return res
|
||||
else:
|
||||
logger.error(f"导入工作流失败: {response.status_code}, {response.text}")
|
||||
return {"code": response.status_code, "message": response.text}
|
||||
|
||||
def workflow_publish(self, workflow_uuid: str, description: str = "") -> Dict[str, Any]:
|
||||
"""
|
||||
发布工作流
|
||||
|
||||
Args:
|
||||
workflow_uuid: 工作流UUID
|
||||
description: 工作流描述
|
||||
|
||||
Returns:
|
||||
Dict: API响应数据
|
||||
"""
|
||||
payload = {
|
||||
"uuid": workflow_uuid,
|
||||
"description": description,
|
||||
"published": True,
|
||||
}
|
||||
logger.info(f"正在发布工作流: {workflow_uuid}")
|
||||
response = requests.patch(
|
||||
f"{self.remote_addr}/lab/workflow/owner",
|
||||
json=payload,
|
||||
headers={"Authorization": f"Lab {self.auth}"},
|
||||
timeout=60,
|
||||
)
|
||||
if response.status_code == 200:
|
||||
res = response.json()
|
||||
if "code" in res and res["code"] != 0:
|
||||
logger.error(f"发布工作流失败: {response.text}")
|
||||
else:
|
||||
logger.info(f"工作流发布成功: {workflow_uuid}")
|
||||
return res
|
||||
else:
|
||||
logger.error(f"发布工作流失败: {response.status_code}, {response.text}")
|
||||
return {"code": response.status_code, "message": response.text}
|
||||
|
||||
|
||||
# 创建默认客户端实例
|
||||
http_client = HTTPClient()
|
||||
|
||||
@@ -76,6 +76,7 @@ class JobInfo:
|
||||
start_time: float
|
||||
last_update_time: float = field(default_factory=time.time)
|
||||
ready_timeout: Optional[float] = None # READY状态的超时时间
|
||||
always_free: bool = False # 是否为永久闲置动作(不受排队限制)
|
||||
|
||||
def update_timestamp(self):
|
||||
"""更新最后更新时间"""
|
||||
@@ -127,6 +128,15 @@ class DeviceActionManager:
|
||||
# 总是将job添加到all_jobs中
|
||||
self.all_jobs[job_info.job_id] = job_info
|
||||
|
||||
# always_free的动作不受排队限制,直接设为READY
|
||||
if job_info.always_free:
|
||||
job_info.status = JobStatus.READY
|
||||
job_info.update_timestamp()
|
||||
job_info.set_ready_timeout(10)
|
||||
job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name)
|
||||
logger.trace(f"[DeviceActionManager] Job {job_log} always_free, start immediately")
|
||||
return True
|
||||
|
||||
# 检查是否有正在执行或准备执行的任务
|
||||
if device_key in self.active_jobs:
|
||||
# 有正在执行或准备执行的任务,加入队列
|
||||
@@ -176,11 +186,15 @@ class DeviceActionManager:
|
||||
logger.error(f"[DeviceActionManager] Job {job_log} is not in READY status, current: {job_info.status}")
|
||||
return False
|
||||
|
||||
# 检查设备上是否是这个job
|
||||
if device_key not in self.active_jobs or self.active_jobs[device_key].job_id != job_id:
|
||||
job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name)
|
||||
logger.error(f"[DeviceActionManager] Job {job_log} is not the active job for {device_key}")
|
||||
return False
|
||||
# always_free的job不需要检查active_jobs
|
||||
if not job_info.always_free:
|
||||
# 检查设备上是否是这个job
|
||||
if device_key not in self.active_jobs or self.active_jobs[device_key].job_id != job_id:
|
||||
job_log = format_job_log(
|
||||
job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name
|
||||
)
|
||||
logger.error(f"[DeviceActionManager] Job {job_log} is not the active job for {device_key}")
|
||||
return False
|
||||
|
||||
# 开始执行任务,将状态从READY转换为STARTED
|
||||
job_info.status = JobStatus.STARTED
|
||||
@@ -203,6 +217,13 @@ class DeviceActionManager:
|
||||
job_info = self.all_jobs[job_id]
|
||||
device_key = job_info.device_action_key
|
||||
|
||||
# always_free的job直接清理,不影响队列
|
||||
if job_info.always_free:
|
||||
job_info.status = JobStatus.ENDED
|
||||
job_info.update_timestamp()
|
||||
del self.all_jobs[job_id]
|
||||
return None
|
||||
|
||||
# 移除活跃任务
|
||||
if device_key in self.active_jobs and self.active_jobs[device_key].job_id == job_id:
|
||||
del self.active_jobs[device_key]
|
||||
@@ -234,9 +255,14 @@ class DeviceActionManager:
|
||||
return None
|
||||
|
||||
def get_active_jobs(self) -> List[JobInfo]:
|
||||
"""获取所有正在执行的任务"""
|
||||
"""获取所有正在执行的任务(含active_jobs和always_free的STARTED job)"""
|
||||
with self.lock:
|
||||
return list(self.active_jobs.values())
|
||||
jobs = list(self.active_jobs.values())
|
||||
# 补充 always_free 的 STARTED job(它们不在 active_jobs 中)
|
||||
for job in self.all_jobs.values():
|
||||
if job.always_free and job.status == JobStatus.STARTED and job not in jobs:
|
||||
jobs.append(job)
|
||||
return jobs
|
||||
|
||||
def get_queued_jobs(self) -> List[JobInfo]:
|
||||
"""获取所有排队中的任务"""
|
||||
@@ -261,6 +287,14 @@ class DeviceActionManager:
|
||||
job_info = self.all_jobs[job_id]
|
||||
device_key = job_info.device_action_key
|
||||
|
||||
# always_free的job直接清理
|
||||
if job_info.always_free:
|
||||
job_info.status = JobStatus.ENDED
|
||||
del self.all_jobs[job_id]
|
||||
job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name)
|
||||
logger.trace(f"[DeviceActionManager] Always-free job {job_log} cancelled")
|
||||
return True
|
||||
|
||||
# 如果是正在执行的任务
|
||||
if device_key in self.active_jobs and self.active_jobs[device_key].job_id == job_id:
|
||||
# 清理active job状态
|
||||
@@ -334,13 +368,18 @@ class DeviceActionManager:
|
||||
timeout_jobs = []
|
||||
|
||||
with self.lock:
|
||||
# 统计READY状态的任务数量
|
||||
ready_jobs_count = sum(1 for job in self.active_jobs.values() if job.status == JobStatus.READY)
|
||||
# 收集所有需要检查的 READY 任务(active_jobs + always_free READY jobs)
|
||||
ready_candidates = list(self.active_jobs.values())
|
||||
for job in self.all_jobs.values():
|
||||
if job.always_free and job.status == JobStatus.READY and job not in ready_candidates:
|
||||
ready_candidates.append(job)
|
||||
|
||||
ready_jobs_count = sum(1 for job in ready_candidates if job.status == JobStatus.READY)
|
||||
if ready_jobs_count > 0:
|
||||
logger.trace(f"[DeviceActionManager] Checking {ready_jobs_count} READY jobs for timeout") # type: ignore # noqa: E501
|
||||
|
||||
# 找到所有超时的READY任务(只检测,不处理)
|
||||
for job_info in self.active_jobs.values():
|
||||
for job_info in ready_candidates:
|
||||
if job_info.is_ready_timeout():
|
||||
timeout_jobs.append(job_info)
|
||||
job_log = format_job_log(
|
||||
@@ -608,6 +647,24 @@ class MessageProcessor:
|
||||
if host_node:
|
||||
host_node.handle_pong_response(pong_data)
|
||||
|
||||
def _check_action_always_free(self, device_id: str, action_name: str) -> bool:
|
||||
"""检查该action是否标记为always_free,通过HostNode统一的_action_value_mappings查找"""
|
||||
try:
|
||||
host_node = HostNode.get_instance(0)
|
||||
if not host_node:
|
||||
return False
|
||||
# noinspection PyProtectedMember
|
||||
action_mappings = host_node._action_value_mappings.get(device_id)
|
||||
if not action_mappings:
|
||||
return False
|
||||
# 尝试直接匹配或 auto- 前缀匹配
|
||||
for key in [action_name, f"auto-{action_name}"]:
|
||||
if key in action_mappings:
|
||||
return action_mappings[key].get("always_free", False)
|
||||
return False
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
async def _handle_query_action_state(self, data: Dict[str, Any]):
|
||||
"""处理query_action_state消息"""
|
||||
device_id = data.get("device_id", "")
|
||||
@@ -622,6 +679,9 @@ class MessageProcessor:
|
||||
|
||||
device_action_key = f"/devices/{device_id}/{action_name}"
|
||||
|
||||
# 检查action是否为always_free
|
||||
action_always_free = self._check_action_always_free(device_id, action_name)
|
||||
|
||||
# 创建任务信息
|
||||
job_info = JobInfo(
|
||||
job_id=job_id,
|
||||
@@ -631,6 +691,7 @@ class MessageProcessor:
|
||||
device_action_key=device_action_key,
|
||||
status=JobStatus.QUEUE,
|
||||
start_time=time.time(),
|
||||
always_free=action_always_free,
|
||||
)
|
||||
|
||||
# 添加到设备管理器
|
||||
@@ -1123,6 +1184,11 @@ class QueueProcessor:
|
||||
logger.debug(f"[QueueProcessor] Sending busy status for {len(queued_jobs)} queued jobs")
|
||||
|
||||
for job_info in queued_jobs:
|
||||
# 快照可能已过期:在遍历过程中 end_job() 可能已将此 job 移至 READY,
|
||||
# 此时不应再发送 busy/need_more,否则会覆盖已发出的 free=True 通知
|
||||
if job_info.status != JobStatus.QUEUE:
|
||||
continue
|
||||
|
||||
message = {
|
||||
"action": "report_action_state",
|
||||
"data": {
|
||||
|
||||
@@ -23,6 +23,7 @@ class BasicConfig:
|
||||
disable_browser = False # 禁止浏览器自动打开
|
||||
port = 8002 # 本地HTTP服务
|
||||
check_mode = False # CI 检查模式,用于验证 registry 导入和文件一致性
|
||||
test_mode = False # 测试模式,所有动作不实际执行,返回模拟结果
|
||||
# 'TRACE', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
|
||||
log_level: Literal["TRACE", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] = "DEBUG"
|
||||
|
||||
@@ -145,5 +146,5 @@ def load_config(config_path=None):
|
||||
traceback.print_exc()
|
||||
exit(1)
|
||||
else:
|
||||
config_path = os.path.join(os.path.dirname(__file__), "local_config.py")
|
||||
config_path = os.path.join(os.path.dirname(__file__), "example_config.py")
|
||||
load_config(config_path)
|
||||
|
||||
@@ -21,7 +21,7 @@ from pylabrobot.resources import (
|
||||
ResourceHolder,
|
||||
Lid,
|
||||
Trash,
|
||||
Tip,
|
||||
Tip, TubeRack,
|
||||
)
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
@@ -696,10 +696,13 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware):
|
||||
|
||||
如果 liquid_names 和 volumes 为空,但 plate 和 well_names 不为空,直接返回 plate 和 wells。
|
||||
"""
|
||||
assert issubclass(plate.__class__, Plate), "plate must be a Plate"
|
||||
plate: Plate = cast(Plate, cast(Resource, plate))
|
||||
assert issubclass(plate.__class__, Plate) or issubclass(plate.__class__, TubeRack) , f"plate must be a Plate, now: {type(plate)}"
|
||||
plate: Union[Plate, TubeRack]
|
||||
# 根据 well_names 获取对应的 Well 对象
|
||||
wells = [plate.get_well(name) for name in well_names]
|
||||
if issubclass(plate.__class__, Plate):
|
||||
wells = [plate.get_well(name) for name in well_names]
|
||||
elif issubclass(plate.__class__, TubeRack):
|
||||
wells = [plate.get_tube(name) for name in well_names]
|
||||
res_volumes = []
|
||||
|
||||
# 如果 liquid_names 和 volumes 都为空,直接返回
|
||||
|
||||
@@ -91,7 +91,7 @@ class PRCXI9300Deck(Deck):
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, size_x: float, size_y: float, size_z: float, **kwargs):
|
||||
super().__init__(name, size_x, size_y, size_z)
|
||||
super().__init__(size_x, size_y, size_z, name)
|
||||
self.slots = [None] * 16 # PRCXI 9300/9320 最大有 16 个槽位
|
||||
self.slot_locations = [Coordinate(0, 0, 0)] * 16
|
||||
|
||||
@@ -248,14 +248,15 @@ class PRCXI9300TipRack(TipRack):
|
||||
if ordered_items is not None:
|
||||
items = ordered_items
|
||||
elif ordering is not None:
|
||||
# 检查 ordering 中的值是否是字符串(从 JSON 反序列化时的情况)
|
||||
# 如果是字符串,说明这是位置名称,需要让 TipRack 自己创建 Tip 对象
|
||||
# 我们只传递位置信息(键),不传递值,使用 ordering 参数
|
||||
if ordering and isinstance(next(iter(ordering.values()), None), str):
|
||||
# ordering 的值是字符串,只使用键(位置信息)创建新的 OrderedDict
|
||||
# 检查 ordering 中的值类型来决定如何处理:
|
||||
# - 字符串值(从 JSON 反序列化): 只用键创建 ordering_param
|
||||
# - None 值(从第二次往返序列化): 同样只用键创建 ordering_param
|
||||
# - 对象值(已经是实际的 Resource 对象): 直接作为 ordered_items 使用
|
||||
first_val = next(iter(ordering.values()), None) if ordering else None
|
||||
if not ordering or first_val is None or isinstance(first_val, str):
|
||||
# ordering 的值是字符串或 None,只使用键(位置信息)创建新的 OrderedDict
|
||||
# 传递 ordering 参数而不是 ordered_items,让 TipRack 自己创建 Tip 对象
|
||||
items = None
|
||||
# 使用 ordering 参数,只包含位置信息(键)
|
||||
ordering_param = collections.OrderedDict((k, None) for k in ordering.keys())
|
||||
else:
|
||||
# ordering 的值已经是对象,可以直接使用
|
||||
@@ -397,14 +398,15 @@ class PRCXI9300TubeRack(TubeRack):
|
||||
items_to_pass = ordered_items
|
||||
ordering_param = None
|
||||
elif ordering is not None:
|
||||
# 检查 ordering 中的值是否是字符串(从 JSON 反序列化时的情况)
|
||||
# 如果是字符串,说明这是位置名称,需要让 TubeRack 自己创建 Tube 对象
|
||||
# 我们只传递位置信息(键),不传递值,使用 ordering 参数
|
||||
if ordering and isinstance(next(iter(ordering.values()), None), str):
|
||||
# ordering 的值是字符串,只使用键(位置信息)创建新的 OrderedDict
|
||||
# 检查 ordering 中的值类型来决定如何处理:
|
||||
# - 字符串值(从 JSON 反序列化): 只用键创建 ordering_param
|
||||
# - None 值(从第二次往返序列化): 同样只用键创建 ordering_param
|
||||
# - 对象值(已经是实际的 Resource 对象): 直接作为 ordered_items 使用
|
||||
first_val = next(iter(ordering.values()), None) if ordering else None
|
||||
if not ordering or first_val is None or isinstance(first_val, str):
|
||||
# ordering 的值是字符串或 None,只使用键(位置信息)创建新的 OrderedDict
|
||||
# 传递 ordering 参数而不是 ordered_items,让 TubeRack 自己创建 Tube 对象
|
||||
items_to_pass = None
|
||||
# 使用 ordering 参数,只包含位置信息(键)
|
||||
ordering_param = collections.OrderedDict((k, None) for k in ordering.keys())
|
||||
else:
|
||||
# ordering 的值已经是对象,可以直接使用
|
||||
|
||||
@@ -19,10 +19,11 @@ from rclpy.node import Node
|
||||
import re
|
||||
|
||||
class LiquidHandlerJointPublisher(BaseROS2DeviceNode):
|
||||
def __init__(self,resources_config:list, resource_tracker, rate=50, device_id:str = "lh_joint_publisher", **kwargs):
|
||||
def __init__(self,resources_config:list, resource_tracker, rate=50, device_id:str = "lh_joint_publisher", registry_name: str = "lh_joint_publisher", **kwargs):
|
||||
super().__init__(
|
||||
driver_instance=self,
|
||||
device_id=device_id,
|
||||
registry_name=registry_name,
|
||||
status_types={},
|
||||
action_value_mappings={},
|
||||
hardware_interface={},
|
||||
|
||||
@@ -22,7 +22,7 @@ from threading import Lock, RLock
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode
|
||||
from unilabos.utils.decorator import not_action
|
||||
from unilabos.utils.decorator import not_action, always_free
|
||||
from unilabos.resources.resource_tracker import SampleUUIDsType, LabSample, RETURN_UNILABOS_SAMPLES
|
||||
|
||||
|
||||
@@ -123,8 +123,8 @@ class VirtualWorkbench:
|
||||
_ros_node: BaseROS2DeviceNode
|
||||
|
||||
# 配置常量
|
||||
ARM_OPERATION_TIME: float = 3.0 # 机械臂操作时间(秒)
|
||||
HEATING_TIME: float = 10.0 # 加热时间(秒)
|
||||
ARM_OPERATION_TIME: float = 2 # 机械臂操作时间(秒)
|
||||
HEATING_TIME: float = 60.0 # 加热时间(秒)
|
||||
NUM_HEATING_STATIONS: int = 3 # 加热台数量
|
||||
|
||||
def __init__(self, device_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None, **kwargs):
|
||||
@@ -141,9 +141,9 @@ class VirtualWorkbench:
|
||||
self.data: Dict[str, Any] = {}
|
||||
|
||||
# 从config中获取可配置参数
|
||||
self.ARM_OPERATION_TIME = float(self.config.get("arm_operation_time", 3.0))
|
||||
self.HEATING_TIME = float(self.config.get("heating_time", 10.0))
|
||||
self.NUM_HEATING_STATIONS = int(self.config.get("num_heating_stations", 3))
|
||||
self.ARM_OPERATION_TIME = float(self.config.get("arm_operation_time", self.ARM_OPERATION_TIME))
|
||||
self.HEATING_TIME = float(self.config.get("heating_time", self.HEATING_TIME))
|
||||
self.NUM_HEATING_STATIONS = int(self.config.get("num_heating_stations", self.NUM_HEATING_STATIONS))
|
||||
|
||||
# 机械臂状态和锁 (使用threading.Lock)
|
||||
self._arm_lock = Lock()
|
||||
@@ -431,6 +431,7 @@ class VirtualWorkbench:
|
||||
sample_uuid, content in sample_uuids.items()]
|
||||
}
|
||||
|
||||
@always_free
|
||||
def start_heating(
|
||||
self,
|
||||
sample_uuids: SampleUUIDsType,
|
||||
@@ -501,10 +502,21 @@ class VirtualWorkbench:
|
||||
|
||||
self._update_data_status(f"加热台{station_id}开始加热{material_id}")
|
||||
|
||||
# 模拟加热过程 (10秒)
|
||||
# 打印当前所有正在加热的台位
|
||||
with self._stations_lock:
|
||||
heating_list = [
|
||||
f"加热台{sid}:{s.current_material}"
|
||||
for sid, s in self._heating_stations.items()
|
||||
if s.state == HeatingStationState.HEATING and s.current_material
|
||||
]
|
||||
self.logger.info(f"[并行加热] 当前同时加热中: {', '.join(heating_list)}")
|
||||
|
||||
# 模拟加热过程
|
||||
start_time = time.time()
|
||||
last_countdown_log = start_time
|
||||
while True:
|
||||
elapsed = time.time() - start_time
|
||||
remaining = max(0.0, self.HEATING_TIME - elapsed)
|
||||
progress = min(100.0, (elapsed / self.HEATING_TIME) * 100)
|
||||
|
||||
with self._stations_lock:
|
||||
@@ -512,6 +524,11 @@ class VirtualWorkbench:
|
||||
|
||||
self._update_data_status(f"加热台{station_id}加热中: {progress:.1f}%")
|
||||
|
||||
# 每5秒打印一次倒计时
|
||||
if time.time() - last_countdown_log >= 5.0:
|
||||
self.logger.info(f"[加热台{station_id}] {material_id} 剩余 {remaining:.1f}s")
|
||||
last_countdown_log = time.time()
|
||||
|
||||
if elapsed >= self.HEATING_TIME:
|
||||
break
|
||||
|
||||
|
||||
@@ -96,10 +96,13 @@ serial:
|
||||
type: string
|
||||
port:
|
||||
type: string
|
||||
registry_name:
|
||||
type: string
|
||||
resource_tracker:
|
||||
type: object
|
||||
required:
|
||||
- device_id
|
||||
- registry_name
|
||||
- port
|
||||
type: object
|
||||
data:
|
||||
|
||||
@@ -67,6 +67,9 @@ camera:
|
||||
period:
|
||||
default: 0.1
|
||||
type: number
|
||||
registry_name:
|
||||
default: ''
|
||||
type: string
|
||||
resource_tracker:
|
||||
type: object
|
||||
required: []
|
||||
|
||||
@@ -6090,6 +6090,7 @@ virtual_workbench:
|
||||
type: object
|
||||
type: UniLabJsonCommand
|
||||
auto-start_heating:
|
||||
always_free: true
|
||||
feedback: {}
|
||||
goal: {}
|
||||
goal_default:
|
||||
|
||||
@@ -5,6 +5,7 @@ import sys
|
||||
import inspect
|
||||
import importlib
|
||||
import threading
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Union, Tuple
|
||||
@@ -88,6 +89,14 @@ class Registry:
|
||||
)
|
||||
test_latency_schema["description"] = "用于测试延迟的动作,返回延迟时间和时间差。"
|
||||
|
||||
test_resource_method_info = host_node_enhanced_info.get("action_methods", {}).get("test_resource", {})
|
||||
test_resource_schema = self._generate_unilab_json_command_schema(
|
||||
test_resource_method_info.get("args", []),
|
||||
"test_resource",
|
||||
test_resource_method_info.get("return_annotation"),
|
||||
)
|
||||
test_resource_schema["description"] = "用于测试物料、设备和样本。"
|
||||
|
||||
self.device_type_registry.update(
|
||||
{
|
||||
"host_node": {
|
||||
@@ -189,32 +198,7 @@ class Registry:
|
||||
"goal": {},
|
||||
"feedback": {},
|
||||
"result": {},
|
||||
"schema": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"feedback": {},
|
||||
"goal": {
|
||||
"properties": {
|
||||
"resource": ros_message_to_json_schema(Resource, "resource"),
|
||||
"resources": {
|
||||
"items": {
|
||||
"properties": ros_message_to_json_schema(
|
||||
Resource, "resources"
|
||||
),
|
||||
"type": "object",
|
||||
},
|
||||
"type": "array",
|
||||
},
|
||||
"device": {"type": "string"},
|
||||
"devices": {"items": {"type": "string"}, "type": "array"},
|
||||
},
|
||||
"type": "object",
|
||||
},
|
||||
"result": {},
|
||||
},
|
||||
"title": "test_resource",
|
||||
"type": "object",
|
||||
},
|
||||
"schema": test_resource_schema,
|
||||
"placeholder_keys": {
|
||||
"device": "unilabos_devices",
|
||||
"devices": "unilabos_devices",
|
||||
@@ -838,6 +822,7 @@ class Registry:
|
||||
("list", "unilabos.registry.placeholder_type:DeviceSlot"),
|
||||
]
|
||||
},
|
||||
**({"always_free": True} if v.get("always_free") else {}),
|
||||
}
|
||||
for k, v in enhanced_info["action_methods"].items()
|
||||
if k not in device_config["class"]["action_value_mappings"]
|
||||
@@ -943,6 +928,7 @@ class Registry:
|
||||
if is_valid:
|
||||
results.append((file, data, device_ids))
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
logger.warning(f"[UniLab Registry] 处理设备文件异常: {file}, 错误: {e}")
|
||||
|
||||
# 线程安全地更新注册表
|
||||
|
||||
@@ -38,24 +38,52 @@ class LabSample(TypedDict):
|
||||
extra: Dict[str, Any]
|
||||
|
||||
|
||||
class ResourceDictPositionSizeType(TypedDict):
|
||||
depth: float
|
||||
width: float
|
||||
height: float
|
||||
|
||||
|
||||
class ResourceDictPositionSize(BaseModel):
|
||||
depth: float = Field(description="Depth", default=0.0) # z
|
||||
width: float = Field(description="Width", default=0.0) # x
|
||||
height: float = Field(description="Height", default=0.0) # y
|
||||
|
||||
|
||||
class ResourceDictPositionScaleType(TypedDict):
|
||||
x: float
|
||||
y: float
|
||||
z: float
|
||||
|
||||
|
||||
class ResourceDictPositionScale(BaseModel):
|
||||
x: float = Field(description="x scale", default=0.0)
|
||||
y: float = Field(description="y scale", default=0.0)
|
||||
z: float = Field(description="z scale", default=0.0)
|
||||
|
||||
|
||||
class ResourceDictPositionObjectType(TypedDict):
|
||||
x: float
|
||||
y: float
|
||||
z: float
|
||||
|
||||
|
||||
class ResourceDictPositionObject(BaseModel):
|
||||
x: float = Field(description="X coordinate", default=0.0)
|
||||
y: float = Field(description="Y coordinate", default=0.0)
|
||||
z: float = Field(description="Z coordinate", default=0.0)
|
||||
|
||||
|
||||
class ResourceDictPositionType(TypedDict):
|
||||
size: ResourceDictPositionSizeType
|
||||
scale: ResourceDictPositionScaleType
|
||||
layout: Literal["2d", "x-y", "z-y", "x-z"]
|
||||
position: ResourceDictPositionObjectType
|
||||
position3d: ResourceDictPositionObjectType
|
||||
rotation: ResourceDictPositionObjectType
|
||||
cross_section_type: Literal["rectangle", "circle", "rounded_rectangle"]
|
||||
|
||||
|
||||
class ResourceDictPosition(BaseModel):
|
||||
size: ResourceDictPositionSize = Field(description="Resource size", default_factory=ResourceDictPositionSize)
|
||||
scale: ResourceDictPositionScale = Field(description="Resource scale", default_factory=ResourceDictPositionScale)
|
||||
@@ -74,6 +102,24 @@ class ResourceDictPosition(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class ResourceDictType(TypedDict):
|
||||
id: str
|
||||
uuid: str
|
||||
name: str
|
||||
description: str
|
||||
resource_schema: Dict[str, Any]
|
||||
model: Dict[str, Any]
|
||||
icon: str
|
||||
parent_uuid: Optional[str]
|
||||
parent: Optional["ResourceDictType"]
|
||||
type: Union[Literal["device"], str]
|
||||
klass: str
|
||||
pose: ResourceDictPositionType
|
||||
config: Dict[str, Any]
|
||||
data: Dict[str, Any]
|
||||
extra: Dict[str, Any]
|
||||
|
||||
|
||||
# 统一的资源字典模型,parent 自动序列化为 parent_uuid,children 不序列化
|
||||
class ResourceDict(BaseModel):
|
||||
id: str = Field(description="Resource ID")
|
||||
|
||||
@@ -44,8 +44,7 @@ def ros2_device_node(
|
||||
# 从属性中自动发现可发布状态
|
||||
if status_types is None:
|
||||
status_types = {}
|
||||
if device_config is None:
|
||||
raise ValueError("device_config cannot be None")
|
||||
assert device_config is not None, "device_config cannot be None"
|
||||
if action_value_mappings is None:
|
||||
action_value_mappings = {}
|
||||
if hardware_interface is None:
|
||||
|
||||
@@ -146,7 +146,7 @@ def init_wrapper(
|
||||
device_id: str,
|
||||
device_uuid: str,
|
||||
driver_class: type[T],
|
||||
device_config: ResourceTreeInstance,
|
||||
device_config: ResourceDictInstance,
|
||||
status_types: Dict[str, Any],
|
||||
action_value_mappings: Dict[str, Any],
|
||||
hardware_interface: Dict[str, Any],
|
||||
@@ -279,6 +279,7 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
||||
self,
|
||||
driver_instance: T,
|
||||
device_id: str,
|
||||
registry_name: str,
|
||||
device_uuid: str,
|
||||
status_types: Dict[str, Any],
|
||||
action_value_mappings: Dict[str, Any],
|
||||
@@ -300,6 +301,7 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
||||
"""
|
||||
self.driver_instance = driver_instance
|
||||
self.device_id = device_id
|
||||
self.registry_name = registry_name
|
||||
self.uuid = device_uuid
|
||||
self.publish_high_frequency = False
|
||||
self.callback_group = ReentrantCallbackGroup()
|
||||
@@ -416,7 +418,9 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
||||
if len(rts.root_nodes) == 1 and isinstance(rts_plr_instances[0], RegularContainer):
|
||||
# noinspection PyTypeChecker
|
||||
container_instance: RegularContainer = rts_plr_instances[0]
|
||||
found_resources = self.resource_tracker.figure_resource({"name": container_instance.name}, try_mode=True)
|
||||
found_resources = self.resource_tracker.figure_resource(
|
||||
{"name": container_instance.name}, try_mode=True
|
||||
)
|
||||
if not len(found_resources):
|
||||
self.resource_tracker.add_resource(container_instance)
|
||||
logger.info(f"添加物料{container_instance.name}到资源跟踪器")
|
||||
@@ -456,7 +460,7 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
||||
}
|
||||
res.response = json.dumps(final_response)
|
||||
# 如果driver自己就有assign的方法,那就使用driver自己的assign方法
|
||||
if hasattr(self.driver_instance, "create_resource"):
|
||||
if hasattr(self.driver_instance, "create_resource") and self.node_name != "host_node":
|
||||
create_resource_func = getattr(self.driver_instance, "create_resource")
|
||||
try:
|
||||
ret = create_resource_func(
|
||||
@@ -1152,6 +1156,7 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
||||
"machine_name": BasicConfig.machine_name,
|
||||
"type": "slave",
|
||||
"edge_device_id": self.device_id,
|
||||
"registry_name": self.registry_name,
|
||||
}
|
||||
},
|
||||
ensure_ascii=False,
|
||||
@@ -1626,9 +1631,7 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
||||
else:
|
||||
resolved_sample_uuids[sample_uuid] = material_uuid
|
||||
function_args[PARAM_SAMPLE_UUIDS] = resolved_sample_uuids
|
||||
self.lab_logger().debug(
|
||||
f"[JsonCommand] 注入 {PARAM_SAMPLE_UUIDS}: {resolved_sample_uuids}"
|
||||
)
|
||||
self.lab_logger().debug(f"[JsonCommand] 注入 {PARAM_SAMPLE_UUIDS}: {resolved_sample_uuids}")
|
||||
continue
|
||||
|
||||
# 处理单个 ResourceSlot
|
||||
@@ -2005,6 +2008,7 @@ class ROS2DeviceNode:
|
||||
|
||||
if driver_is_ros:
|
||||
driver_params["device_id"] = device_id
|
||||
driver_params["registry_name"] = device_config.res_content.klass
|
||||
driver_params["resource_tracker"] = self.resource_tracker
|
||||
self._driver_instance = self._driver_creator.create_instance(driver_params)
|
||||
if self._driver_instance is None:
|
||||
@@ -2022,6 +2026,7 @@ class ROS2DeviceNode:
|
||||
children=children,
|
||||
driver_instance=self._driver_instance, # type: ignore
|
||||
device_id=device_id,
|
||||
registry_name=device_config.res_content.klass,
|
||||
device_uuid=device_uuid,
|
||||
status_types=status_types,
|
||||
action_value_mappings=action_value_mappings,
|
||||
@@ -2033,6 +2038,7 @@ class ROS2DeviceNode:
|
||||
self._ros_node = BaseROS2DeviceNode(
|
||||
driver_instance=self._driver_instance,
|
||||
device_id=device_id,
|
||||
registry_name=device_config.res_content.klass,
|
||||
device_uuid=device_uuid,
|
||||
status_types=status_types,
|
||||
action_value_mappings=action_value_mappings,
|
||||
@@ -2041,6 +2047,7 @@ class ROS2DeviceNode:
|
||||
resource_tracker=self.resource_tracker,
|
||||
)
|
||||
self._ros_node: BaseROS2DeviceNode
|
||||
# 将注册表类型名传递给BaseROS2DeviceNode,用于slave上报
|
||||
self._ros_node.lab_logger().info(f"初始化完成 {self._ros_node.uuid} {self.driver_is_ros}")
|
||||
self.driver_instance._ros_node = self._ros_node # type: ignore
|
||||
self.driver_instance._execute_driver_command = self._ros_node._execute_driver_command # type: ignore
|
||||
|
||||
@@ -6,12 +6,13 @@ from cv_bridge import CvBridge
|
||||
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode, DeviceNodeResourceTracker
|
||||
|
||||
class VideoPublisher(BaseROS2DeviceNode):
|
||||
def __init__(self, device_id='video_publisher', device_uuid='', camera_index=0, period: float = 0.1, resource_tracker: DeviceNodeResourceTracker = None):
|
||||
def __init__(self, device_id='video_publisher', registry_name="", device_uuid='', camera_index=0, period: float = 0.1, resource_tracker: DeviceNodeResourceTracker = None):
|
||||
# 初始化BaseROS2DeviceNode,使用自身作为driver_instance
|
||||
BaseROS2DeviceNode.__init__(
|
||||
self,
|
||||
driver_instance=self,
|
||||
device_id=device_id,
|
||||
registry_name=registry_name,
|
||||
device_uuid=device_uuid,
|
||||
status_types={},
|
||||
action_value_mappings={},
|
||||
|
||||
@@ -10,6 +10,7 @@ class ControllerNode(BaseROS2DeviceNode):
|
||||
def __init__(
|
||||
self,
|
||||
device_id: str,
|
||||
registry_name: str,
|
||||
controller_func: Callable,
|
||||
update_rate: float,
|
||||
inputs: Dict[str, Dict[str, type | str]],
|
||||
@@ -51,6 +52,7 @@ class ControllerNode(BaseROS2DeviceNode):
|
||||
self,
|
||||
driver_instance=self,
|
||||
device_id=device_id,
|
||||
registry_name=registry_name,
|
||||
status_types=status_types,
|
||||
action_value_mappings=action_value_mappings,
|
||||
hardware_interface=hardware_interface,
|
||||
|
||||
@@ -35,7 +35,7 @@ from unilabos.resources.resource_tracker import (
|
||||
ResourceTreeInstance,
|
||||
RETURN_UNILABOS_SAMPLES,
|
||||
JSON_UNILABOS_PARAM,
|
||||
PARAM_SAMPLE_UUIDS,
|
||||
PARAM_SAMPLE_UUIDS, SampleUUIDsType, LabSample,
|
||||
)
|
||||
from unilabos.ros.initialize_device import initialize_device_from_dict
|
||||
from unilabos.ros.msgs.message_converter import (
|
||||
@@ -51,6 +51,7 @@ from unilabos.utils import logger
|
||||
from unilabos.utils.exception import DeviceClassInvalid
|
||||
from unilabos.utils.log import warning
|
||||
from unilabos.utils.type_check import serialize_result_info
|
||||
from unilabos.config.config import BasicConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from unilabos.app.ws_client import QueueItem
|
||||
@@ -63,7 +64,8 @@ class DeviceActionStatus:
|
||||
|
||||
class TestResourceReturn(TypedDict):
|
||||
resources: List[List[ResourceDict]]
|
||||
devices: List[DeviceSlot]
|
||||
devices: List[Dict[str, Any]]
|
||||
unilabos_samples: List[LabSample]
|
||||
|
||||
|
||||
class TestLatencyReturn(TypedDict):
|
||||
@@ -248,6 +250,7 @@ class HostNode(BaseROS2DeviceNode):
|
||||
self,
|
||||
driver_instance=self,
|
||||
device_id=device_id,
|
||||
registry_name="host_node",
|
||||
device_uuid=host_node_dict["uuid"],
|
||||
status_types={},
|
||||
action_value_mappings=lab_registry.device_type_registry["host_node"]["class"]["action_value_mappings"],
|
||||
@@ -302,7 +305,8 @@ class HostNode(BaseROS2DeviceNode):
|
||||
} # 用来存储多个ActionClient实例
|
||||
self._action_value_mappings: Dict[str, Dict] = (
|
||||
{}
|
||||
) # 用来存储多个ActionClient的type, goal, feedback, result的变量名映射关系
|
||||
) # device_id -> action_value_mappings(本地+远程设备统一存储)
|
||||
self._slave_registry_configs: Dict[str, Dict] = {} # registry_name -> registry_config(含action_value_mappings)
|
||||
self._goals: Dict[str, Any] = {} # 用来存储多个目标的状态
|
||||
self._online_devices: Set[str] = {f"{self.namespace}/{device_id}"} # 用于跟踪在线设备
|
||||
self._last_discovery_time = 0.0 # 上次设备发现的时间
|
||||
@@ -636,6 +640,8 @@ class HostNode(BaseROS2DeviceNode):
|
||||
self.device_machine_names[device_id] = "本地"
|
||||
self.devices_instances[device_id] = d
|
||||
# noinspection PyProtectedMember
|
||||
self._action_value_mappings[device_id] = d._ros_node._action_value_mappings
|
||||
# noinspection PyProtectedMember
|
||||
for action_name, action_value_mapping in d._ros_node._action_value_mappings.items():
|
||||
if action_name.startswith("auto-") or str(action_value_mapping.get("type", "")).startswith(
|
||||
"UniLabJsonCommand"
|
||||
@@ -772,6 +778,17 @@ class HostNode(BaseROS2DeviceNode):
|
||||
u = uuid.UUID(item.job_id)
|
||||
device_id = item.device_id
|
||||
action_name = item.action_name
|
||||
|
||||
if BasicConfig.test_mode:
|
||||
action_id = f"/devices/{device_id}/{action_name}"
|
||||
self.lab_logger().info(
|
||||
f"[TEST MODE] 模拟执行: {action_id} (job={item.job_id[:8]}), 参数: {str(action_kwargs)[:500]}"
|
||||
)
|
||||
# 根据注册表 handles 构建模拟返回值
|
||||
mock_return = self._build_test_mode_return(device_id, action_name, action_kwargs)
|
||||
self._handle_test_mode_result(item, action_id, mock_return)
|
||||
return
|
||||
|
||||
if action_type.startswith("UniLabJsonCommand"):
|
||||
if action_name.startswith("auto-"):
|
||||
action_name = action_name[5:]
|
||||
@@ -809,6 +826,51 @@ class HostNode(BaseROS2DeviceNode):
|
||||
)
|
||||
future.add_done_callback(lambda f: self.goal_response_callback(item, action_id, f))
|
||||
|
||||
def _build_test_mode_return(
|
||||
self, device_id: str, action_name: str, action_kwargs: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
根据注册表 handles 的 output 定义构建测试模式的模拟返回值
|
||||
|
||||
根据 data_key 中 @flatten 的层数决定嵌套数组层数,叶子值为空字典。
|
||||
例如: "vessel" → {}, "plate.@flatten" → [{}], "a.@flatten.@flatten" → [[{}]]
|
||||
"""
|
||||
mock_return: Dict[str, Any] = {"test_mode": True, "action_name": action_name}
|
||||
action_mappings = self._action_value_mappings.get(device_id, {})
|
||||
action_mapping = action_mappings.get(action_name, {})
|
||||
handles = action_mapping.get("handles", {})
|
||||
if isinstance(handles, dict):
|
||||
for output_handle in handles.get("output", []):
|
||||
data_key = output_handle.get("data_key", "")
|
||||
handler_key = output_handle.get("handler_key", "")
|
||||
# 根据 @flatten 层数构建嵌套数组,叶子为空字典
|
||||
flatten_count = data_key.count("@flatten")
|
||||
value: Any = {}
|
||||
for _ in range(flatten_count):
|
||||
value = [value]
|
||||
mock_return[handler_key] = value
|
||||
return mock_return
|
||||
|
||||
def _handle_test_mode_result(
|
||||
self, item: "QueueItem", action_id: str, mock_return: Dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
测试模式下直接构建结果并走正常的结果回调流程(跳过 ROS)
|
||||
"""
|
||||
job_id = item.job_id
|
||||
status = "success"
|
||||
return_info = serialize_result_info("", True, mock_return)
|
||||
|
||||
self.lab_logger().info(f"[TEST MODE] Result for {action_id} ({job_id[:8]}): {status}")
|
||||
|
||||
from unilabos.app.web.controller import store_job_result
|
||||
store_job_result(job_id, status, return_info, mock_return)
|
||||
|
||||
# 发布状态到桥接器
|
||||
for bridge in self.bridges:
|
||||
if hasattr(bridge, "publish_job_status"):
|
||||
bridge.publish_job_status(mock_return, item, status, return_info)
|
||||
|
||||
def goal_response_callback(self, item: "QueueItem", action_id: str, future) -> None:
|
||||
"""目标响应回调"""
|
||||
goal_handle = future.result()
|
||||
@@ -1168,6 +1230,10 @@ class HostNode(BaseROS2DeviceNode):
|
||||
def _node_info_update_callback(self, request, response):
|
||||
"""
|
||||
更新节点信息回调
|
||||
|
||||
处理两种消息:
|
||||
1. 首次上报(main_slave_run): 带 devices_config + registry_config,存储 action_value_mappings
|
||||
2. 设备重注册(SYNC_SLAVE_NODE_INFO): 带 edge_device_id + registry_name,用 registry_name 索引已存储的 mappings
|
||||
"""
|
||||
self.lab_logger().trace(f"[Host Node] Node info update request received: {request}")
|
||||
try:
|
||||
@@ -1179,12 +1245,48 @@ class HostNode(BaseROS2DeviceNode):
|
||||
info = info["SYNC_SLAVE_NODE_INFO"]
|
||||
machine_name = info["machine_name"]
|
||||
edge_device_id = info["edge_device_id"]
|
||||
registry_name = info.get("registry_name", "")
|
||||
self.device_machine_names[edge_device_id] = machine_name
|
||||
|
||||
# 用 registry_name 索引已存储的 registry_config,获取 action_value_mappings
|
||||
if registry_name and registry_name in self._slave_registry_configs:
|
||||
action_mappings = self._slave_registry_configs[registry_name].get(
|
||||
"class", {}
|
||||
).get("action_value_mappings", {})
|
||||
if action_mappings:
|
||||
self._action_value_mappings[edge_device_id] = action_mappings
|
||||
self.lab_logger().info(
|
||||
f"[Host Node] Loaded {len(action_mappings)} action mappings "
|
||||
f"for remote device {edge_device_id} (registry: {registry_name})"
|
||||
)
|
||||
else:
|
||||
devices_config = info.pop("devices_config")
|
||||
registry_config = info.pop("registry_config")
|
||||
if registry_config:
|
||||
http_client.resource_registry({"resources": registry_config})
|
||||
|
||||
# 存储 slave 的 registry_config,用于后续 SYNC_SLAVE_NODE_INFO 索引
|
||||
for reg_name, reg_data in registry_config.items():
|
||||
if isinstance(reg_data, dict) and "class" in reg_data:
|
||||
self._slave_registry_configs[reg_name] = reg_data
|
||||
|
||||
# 解析 devices_config,建立 device_id -> action_value_mappings 映射
|
||||
if devices_config:
|
||||
for device_tree in devices_config:
|
||||
for device_dict in device_tree:
|
||||
device_id = device_dict.get("id", "")
|
||||
class_name = device_dict.get("class", "")
|
||||
if device_id and class_name and class_name in self._slave_registry_configs:
|
||||
action_mappings = self._slave_registry_configs[class_name].get(
|
||||
"class", {}
|
||||
).get("action_value_mappings", {})
|
||||
if action_mappings:
|
||||
self._action_value_mappings[device_id] = action_mappings
|
||||
self.lab_logger().info(
|
||||
f"[Host Node] Stored {len(action_mappings)} action mappings "
|
||||
f"for remote device {device_id} (class: {class_name})"
|
||||
)
|
||||
|
||||
self.lab_logger().debug(f"[Host Node] Node info update: {info}")
|
||||
response.response = "OK"
|
||||
except Exception as e:
|
||||
@@ -1481,6 +1583,7 @@ class HostNode(BaseROS2DeviceNode):
|
||||
|
||||
def test_resource(
|
||||
self,
|
||||
sample_uuids: SampleUUIDsType,
|
||||
resource: ResourceSlot = None,
|
||||
resources: List[ResourceSlot] = None,
|
||||
device: DeviceSlot = None,
|
||||
@@ -1495,6 +1598,7 @@ class HostNode(BaseROS2DeviceNode):
|
||||
return {
|
||||
"resources": ResourceTreeSet.from_plr_resources([resource, *resources], known_newly_created=True).dump(),
|
||||
"devices": [device, *devices],
|
||||
"unilabos_samples": [LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for sample_uuid, content in sample_uuids.items()]
|
||||
}
|
||||
|
||||
def handle_pong_response(self, pong_data: dict):
|
||||
|
||||
@@ -7,10 +7,11 @@ from rclpy.callback_groups import ReentrantCallbackGroup
|
||||
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode
|
||||
|
||||
class JointRepublisher(BaseROS2DeviceNode):
|
||||
def __init__(self,device_id,resource_tracker, **kwargs):
|
||||
def __init__(self,device_id, registry_name, resource_tracker, **kwargs):
|
||||
super().__init__(
|
||||
driver_instance=self,
|
||||
device_id=device_id,
|
||||
registry_name=registry_name,
|
||||
status_types={},
|
||||
action_value_mappings={},
|
||||
hardware_interface={},
|
||||
|
||||
@@ -26,7 +26,7 @@ from unilabos.resources.graphio import initialize_resources
|
||||
from unilabos.registry.registry import lab_registry
|
||||
|
||||
class ResourceMeshManager(BaseROS2DeviceNode):
|
||||
def __init__(self, resource_model: dict, resource_config: list,resource_tracker, device_id: str = "resource_mesh_manager", rate=50, **kwargs):
|
||||
def __init__(self, resource_model: dict, resource_config: list,resource_tracker, device_id: str = "resource_mesh_manager", registry_name: str = "", rate=50, **kwargs):
|
||||
"""初始化资源网格管理器节点
|
||||
|
||||
Args:
|
||||
@@ -37,6 +37,7 @@ class ResourceMeshManager(BaseROS2DeviceNode):
|
||||
super().__init__(
|
||||
driver_instance=self,
|
||||
device_id=device_id,
|
||||
registry_name=registry_name,
|
||||
status_types={},
|
||||
action_value_mappings={},
|
||||
hardware_interface={},
|
||||
|
||||
@@ -7,7 +7,7 @@ from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode, DeviceNodeRe
|
||||
|
||||
|
||||
class ROS2SerialNode(BaseROS2DeviceNode):
|
||||
def __init__(self, device_id, port: str, baudrate: int = 9600, resource_tracker: DeviceNodeResourceTracker=None):
|
||||
def __init__(self, device_id, registry_name, port: str, baudrate: int = 9600, resource_tracker: DeviceNodeResourceTracker=None):
|
||||
# 保存属性,以便在调用父类初始化前使用
|
||||
self.port = port
|
||||
self.baudrate = baudrate
|
||||
@@ -28,6 +28,7 @@ class ROS2SerialNode(BaseROS2DeviceNode):
|
||||
BaseROS2DeviceNode.__init__(
|
||||
self,
|
||||
driver_instance=self,
|
||||
registry_name=registry_name,
|
||||
device_id=device_id,
|
||||
status_types={},
|
||||
action_value_mappings={},
|
||||
|
||||
@@ -47,6 +47,7 @@ class ROS2WorkstationNode(BaseROS2DeviceNode):
|
||||
*,
|
||||
driver_instance: "WorkstationBase",
|
||||
device_id: str,
|
||||
registry_name: str,
|
||||
device_uuid: str,
|
||||
status_types: Dict[str, Any],
|
||||
action_value_mappings: Dict[str, Any],
|
||||
@@ -62,6 +63,7 @@ class ROS2WorkstationNode(BaseROS2DeviceNode):
|
||||
super().__init__(
|
||||
driver_instance=driver_instance,
|
||||
device_id=device_id,
|
||||
registry_name=registry_name,
|
||||
device_uuid=device_uuid,
|
||||
status_types=status_types,
|
||||
action_value_mappings={**action_value_mappings, **self.protocol_action_mappings},
|
||||
|
||||
@@ -339,13 +339,8 @@
|
||||
"z": 0
|
||||
},
|
||||
"config": {
|
||||
"max_volume": 500.0,
|
||||
"type": "RegularContainer",
|
||||
"category": "container",
|
||||
"max_temp": 200.0,
|
||||
"min_temp": -20.0,
|
||||
"has_stirrer": true,
|
||||
"has_heater": true
|
||||
"category": "container"
|
||||
},
|
||||
"data": {
|
||||
"liquids": [],
|
||||
@@ -769,9 +764,7 @@
|
||||
"size_y": 250,
|
||||
"size_z": 0,
|
||||
"type": "RegularContainer",
|
||||
"category": "container",
|
||||
"reagent": "sodium_chloride",
|
||||
"physical_state": "solid"
|
||||
"category": "container"
|
||||
},
|
||||
"data": {
|
||||
"current_mass": 500.0,
|
||||
@@ -792,14 +785,11 @@
|
||||
"z": 0
|
||||
},
|
||||
"config": {
|
||||
"volume": 500.0,
|
||||
"size_x": 600,
|
||||
"size_y": 250,
|
||||
"size_z": 0,
|
||||
"type": "RegularContainer",
|
||||
"category": "container",
|
||||
"reagent": "sodium_carbonate",
|
||||
"physical_state": "solid"
|
||||
"category": "container"
|
||||
},
|
||||
"data": {
|
||||
"current_mass": 500.0,
|
||||
@@ -820,14 +810,11 @@
|
||||
"z": 0
|
||||
},
|
||||
"config": {
|
||||
"volume": 500.0,
|
||||
"size_x": 650,
|
||||
"size_y": 250,
|
||||
"size_z": 0,
|
||||
"type": "RegularContainer",
|
||||
"category": "container",
|
||||
"reagent": "magnesium_chloride",
|
||||
"physical_state": "solid"
|
||||
"category": "container"
|
||||
},
|
||||
"data": {
|
||||
"current_mass": 500.0,
|
||||
|
||||
@@ -184,6 +184,51 @@ def get_all_subscriptions(instance) -> list:
|
||||
return subscriptions
|
||||
|
||||
|
||||
def always_free(func: F) -> F:
|
||||
"""
|
||||
标记动作为永久闲置(不受busy队列限制)的装饰器
|
||||
|
||||
被此装饰器标记的 action 方法,在执行时不会受到设备级别的排队限制,
|
||||
任何时候请求都可以立即执行。适用于查询类、状态读取类等轻量级操作。
|
||||
|
||||
Example:
|
||||
class MyDriver:
|
||||
@always_free
|
||||
def query_status(self, param: str):
|
||||
# 这个动作可以随时执行,不需要排队
|
||||
return self._status
|
||||
|
||||
def transfer(self, volume: float):
|
||||
# 这个动作会按正常排队逻辑执行
|
||||
pass
|
||||
|
||||
Note:
|
||||
- 可以与其他装饰器组合使用,@always_free 应放在最外层
|
||||
- 仅影响 WebSocket 调度层的 busy/free 判断,不影响 ROS2 层
|
||||
"""
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
|
||||
wrapper._is_always_free = True # type: ignore[attr-defined]
|
||||
|
||||
return wrapper # type: ignore[return-value]
|
||||
|
||||
|
||||
def is_always_free(func) -> bool:
|
||||
"""
|
||||
检查函数是否被标记为永久闲置
|
||||
|
||||
Args:
|
||||
func: 被检查的函数
|
||||
|
||||
Returns:
|
||||
如果函数被 @always_free 装饰则返回 True,否则返回 False
|
||||
"""
|
||||
return getattr(func, "_is_always_free", False)
|
||||
|
||||
|
||||
def not_action(func: F) -> F:
|
||||
"""
|
||||
标记方法为非动作的装饰器
|
||||
|
||||
@@ -29,7 +29,7 @@ from ast import Constant
|
||||
|
||||
from unilabos.resources.resource_tracker import PARAM_SAMPLE_UUIDS
|
||||
from unilabos.utils import logger
|
||||
from unilabos.utils.decorator import is_not_action
|
||||
from unilabos.utils.decorator import is_not_action, is_always_free
|
||||
|
||||
|
||||
class ImportManager:
|
||||
@@ -282,6 +282,9 @@ class ImportManager:
|
||||
continue
|
||||
# 其他非_开头的方法归类为action
|
||||
method_info = self._analyze_method_signature(method)
|
||||
# 检查是否被 @always_free 装饰器标记
|
||||
if is_always_free(method):
|
||||
method_info["always_free"] = True
|
||||
result["action_methods"][name] = method_info
|
||||
|
||||
return result
|
||||
@@ -339,6 +342,9 @@ class ImportManager:
|
||||
if self._is_not_action_method(node):
|
||||
continue
|
||||
# 其他非_开头的方法归类为action
|
||||
# 检查是否被 @always_free 装饰器标记
|
||||
if self._is_always_free_method(node):
|
||||
method_info["always_free"] = True
|
||||
result["action_methods"][method_name] = method_info
|
||||
return result
|
||||
|
||||
@@ -474,6 +480,13 @@ class ImportManager:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _is_always_free_method(self, node: ast.FunctionDef) -> bool:
|
||||
"""检查是否是@always_free装饰的方法"""
|
||||
for decorator in node.decorator_list:
|
||||
if isinstance(decorator, ast.Name) and decorator.id == "always_free":
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_property_name_from_setter(self, node: ast.FunctionDef) -> str:
|
||||
"""从setter装饰器中获取属性名"""
|
||||
for decorator in node.decorator_list:
|
||||
|
||||
@@ -193,6 +193,7 @@ def configure_logger(loglevel=None, working_dir=None):
|
||||
root_logger.addHandler(console_handler)
|
||||
|
||||
# 如果指定了工作目录,添加文件处理器
|
||||
log_filepath = None
|
||||
if working_dir is not None:
|
||||
logs_dir = os.path.join(working_dir, "logs")
|
||||
os.makedirs(logs_dir, exist_ok=True)
|
||||
@@ -213,6 +214,7 @@ def configure_logger(loglevel=None, working_dir=None):
|
||||
|
||||
logging.getLogger("asyncio").setLevel(logging.INFO)
|
||||
logging.getLogger("urllib3").setLevel(logging.INFO)
|
||||
return log_filepath
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -362,14 +362,16 @@ def build_protocol_graph(
|
||||
protocol_steps: List[Dict[str, Any]],
|
||||
workstation_name: str,
|
||||
action_resource_mapping: Optional[Dict[str, str]] = None,
|
||||
labware_defs: Optional[List[Dict[str, Any]]] = None,
|
||||
) -> WorkflowGraph:
|
||||
"""统一的协议图构建函数,根据设备类型自动选择构建逻辑
|
||||
|
||||
Args:
|
||||
labware_info: labware 信息字典,格式为 {name: {slot, well, labware, ...}, ...}
|
||||
labware_info: reagent 信息字典,格式为 {name: {slot, well}, ...},用于 set_liquid 和 well 查找
|
||||
protocol_steps: 协议步骤列表
|
||||
workstation_name: 工作站名称
|
||||
action_resource_mapping: action 到 resource_name 的映射字典,可选
|
||||
labware_defs: labware 定义列表,格式为 [{"name": "...", "slot": "1", "type": "lab_xxx"}, ...]
|
||||
"""
|
||||
G = WorkflowGraph()
|
||||
resource_last_writer = {} # reagent_name -> "node_id:port"
|
||||
@@ -377,18 +379,7 @@ def build_protocol_graph(
|
||||
|
||||
protocol_steps = refactor_data(protocol_steps, action_resource_mapping)
|
||||
|
||||
# ==================== 第一步:按 slot 去重创建 create_resource 节点 ====================
|
||||
# 收集所有唯一的 slot
|
||||
slots_info = {} # slot -> {labware, res_id}
|
||||
for labware_id, item in labware_info.items():
|
||||
slot = str(item.get("slot", ""))
|
||||
if slot and slot not in slots_info:
|
||||
res_id = f"plate_slot_{slot}"
|
||||
slots_info[slot] = {
|
||||
"labware": item.get("labware", ""),
|
||||
"res_id": res_id,
|
||||
}
|
||||
|
||||
# ==================== 第一步:按 slot 创建 create_resource 节点 ====================
|
||||
# 创建 Group 节点,包含所有 create_resource 节点
|
||||
group_node_id = str(uuid.uuid4())
|
||||
G.add_node(
|
||||
@@ -404,29 +395,35 @@ def build_protocol_graph(
|
||||
param=None,
|
||||
)
|
||||
|
||||
# 为每个唯一的 slot 创建 create_resource 节点
|
||||
# 直接使用 JSON 中的 labware 定义,每个 slot 一条记录,type 即 class_name
|
||||
res_index = 0
|
||||
for slot, info in slots_info.items():
|
||||
node_id = str(uuid.uuid4())
|
||||
res_id = info["res_id"]
|
||||
for lw in (labware_defs or []):
|
||||
slot = str(lw.get("slot", ""))
|
||||
if not slot or slot in slot_to_create_resource:
|
||||
continue # 跳过空 slot 或已处理的 slot
|
||||
|
||||
lw_name = lw.get("name", f"slot {slot}")
|
||||
lw_type = lw.get("type", CREATE_RESOURCE_DEFAULTS["class_name"])
|
||||
res_id = f"plate_slot_{slot}"
|
||||
|
||||
res_index += 1
|
||||
node_id = str(uuid.uuid4())
|
||||
G.add_node(
|
||||
node_id,
|
||||
template_name="create_resource",
|
||||
resource_name="host_node",
|
||||
name=f"Plate {res_index}",
|
||||
description=f"Create plate on slot {slot}",
|
||||
name=lw_name,
|
||||
description=f"Create {lw_name}",
|
||||
lab_node_type="Labware",
|
||||
footer="create_resource-host_node",
|
||||
device_name=DEVICE_NAME_HOST,
|
||||
type=NODE_TYPE_DEFAULT,
|
||||
parent_uuid=group_node_id, # 指向 Group 节点
|
||||
minimized=True, # 折叠显示
|
||||
parent_uuid=group_node_id,
|
||||
minimized=True,
|
||||
param={
|
||||
"res_id": res_id,
|
||||
"device_id": CREATE_RESOURCE_DEFAULTS["device_id"],
|
||||
"class_name": CREATE_RESOURCE_DEFAULTS["class_name"],
|
||||
"class_name": lw_type,
|
||||
"parent": CREATE_RESOURCE_DEFAULTS["parent_template"].format(slot=slot),
|
||||
"bind_locations": {"x": 0.0, "y": 0.0, "z": 0.0},
|
||||
"slot_on_deck": slot,
|
||||
@@ -434,8 +431,6 @@ def build_protocol_graph(
|
||||
)
|
||||
slot_to_create_resource[slot] = node_id
|
||||
|
||||
# create_resource 之间不需要 ready 连接
|
||||
|
||||
# ==================== 第二步:为每个 reagent 创建 set_liquid_from_plate 节点 ====================
|
||||
# 创建 Group 节点,包含所有 set_liquid_from_plate 节点
|
||||
set_liquid_group_id = str(uuid.uuid4())
|
||||
|
||||
@@ -1,16 +1,20 @@
|
||||
"""
|
||||
JSON 工作流转换模块
|
||||
|
||||
将 workflow/reagent 格式的 JSON 转换为统一工作流格式。
|
||||
将 workflow/reagent/labware 格式的 JSON 转换为统一工作流格式。
|
||||
|
||||
输入格式:
|
||||
{
|
||||
"labware": [
|
||||
{"name": "...", "slot": "1", "type": "lab_xxx"},
|
||||
...
|
||||
],
|
||||
"workflow": [
|
||||
{"action": "...", "action_args": {...}},
|
||||
...
|
||||
],
|
||||
"reagent": {
|
||||
"reagent_name": {"slot": int, "well": [...], "labware": "..."},
|
||||
"reagent_name": {"slot": int, "well": [...]},
|
||||
...
|
||||
}
|
||||
}
|
||||
@@ -245,18 +249,18 @@ def convert_from_json(
|
||||
if "workflow" not in json_data or "reagent" not in json_data:
|
||||
raise ValueError(
|
||||
"不支持的 JSON 格式。请使用标准格式:\n"
|
||||
'{"workflow": [{"action": "...", "action_args": {...}}, ...], '
|
||||
'"reagent": {"name": {"slot": int, "well": [...], "labware": "..."}, ...}}'
|
||||
'{"labware": [...], "workflow": [...], "reagent": {...}}'
|
||||
)
|
||||
|
||||
# 提取数据
|
||||
workflow = json_data["workflow"]
|
||||
reagent = json_data["reagent"]
|
||||
labware_defs = json_data.get("labware", []) # 新的 labware 定义列表
|
||||
|
||||
# 规范化步骤数据
|
||||
protocol_steps = normalize_workflow_steps(workflow)
|
||||
|
||||
# reagent 已经是字典格式,直接使用
|
||||
# reagent 已经是字典格式,用于 set_liquid 和 well 数量查找
|
||||
labware_info = reagent
|
||||
|
||||
# 构建工作流图
|
||||
@@ -265,6 +269,7 @@ def convert_from_json(
|
||||
protocol_steps=protocol_steps,
|
||||
workstation_name=workstation_name,
|
||||
action_resource_mapping=ACTION_RESOURCE_MAPPING,
|
||||
labware_defs=labware_defs,
|
||||
)
|
||||
|
||||
# 校验句柄配置
|
||||
|
||||
@@ -41,6 +41,7 @@ def upload_workflow(
|
||||
workflow_name: Optional[str] = None,
|
||||
tags: Optional[List[str]] = None,
|
||||
published: bool = False,
|
||||
description: str = "",
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
上传工作流到服务器
|
||||
@@ -56,6 +57,7 @@ def upload_workflow(
|
||||
workflow_name: 工作流名称,如果不提供则从文件中读取或使用文件名
|
||||
tags: 工作流标签列表,默认为空列表
|
||||
published: 是否发布工作流,默认为False
|
||||
description: 工作流描述,发布时使用
|
||||
|
||||
Returns:
|
||||
Dict: API响应数据
|
||||
@@ -75,6 +77,14 @@ def upload_workflow(
|
||||
print_status(f"工作流文件JSON解析失败: {e}", "error")
|
||||
return {"code": -1, "message": f"JSON解析失败: {e}"}
|
||||
|
||||
# 从 JSON 文件中提取 description 和 tags(作为 fallback)
|
||||
if not description and "description" in workflow_data:
|
||||
description = workflow_data["description"]
|
||||
print_status(f"从文件中读取 description", "info")
|
||||
if not tags and "tags" in workflow_data:
|
||||
tags = workflow_data["tags"]
|
||||
print_status(f"从文件中读取 tags: {tags}", "info")
|
||||
|
||||
# 自动检测并转换格式
|
||||
if not _is_node_link_format(workflow_data):
|
||||
try:
|
||||
@@ -96,6 +106,7 @@ def upload_workflow(
|
||||
print_status(f" - 节点数量: {len(nodes)}", "info")
|
||||
print_status(f" - 边数量: {len(edges)}", "info")
|
||||
print_status(f" - 标签: {tags or []}", "info")
|
||||
print_status(f" - 描述: {description[:50]}{'...' if len(description) > 50 else ''}", "info")
|
||||
print_status(f" - 发布状态: {published}", "info")
|
||||
|
||||
# 调用 http_client 上传
|
||||
@@ -107,6 +118,7 @@ def upload_workflow(
|
||||
edges=edges,
|
||||
tags=tags,
|
||||
published=published,
|
||||
description=description,
|
||||
)
|
||||
|
||||
if result.get("code") == 0:
|
||||
@@ -131,8 +143,9 @@ def handle_workflow_upload_command(args_dict: Dict[str, Any]) -> None:
|
||||
workflow_name = args_dict.get("workflow_name")
|
||||
tags = args_dict.get("tags", [])
|
||||
published = args_dict.get("published", False)
|
||||
description = args_dict.get("description", "")
|
||||
|
||||
if workflow_file:
|
||||
upload_workflow(workflow_file, workflow_name, tags, published)
|
||||
upload_workflow(workflow_file, workflow_name, tags, published, description)
|
||||
else:
|
||||
print_status("未指定工作流文件路径,请使用 -f/--workflow_file 参数", "error")
|
||||
|
||||
Reference in New Issue
Block a user