diff --git a/unilabos/app/ws_client.py b/unilabos/app/ws_client.py index 4c87d36..95526f0 100644 --- a/unilabos/app/ws_client.py +++ b/unilabos/app/ws_client.py @@ -848,7 +848,7 @@ class MessageProcessor: device_action_groups[key_add].append(item["uuid"]) logger.info( - f"[MessageProcessor] Resource migrated: {item['uuid'][:8]} from {device_old_id} to {device_id}" + f"[资源同步] 跨站Transfer: {item['uuid'][:8]} from {device_old_id} to {device_id}" ) else: # 正常update @@ -863,11 +863,11 @@ class MessageProcessor: device_action_groups[key] = [] device_action_groups[key].append(item["uuid"]) - logger.info(f"触发物料更新 {action} 分组数量: {len(device_action_groups)}, 总数量: {len(resource_uuid_list)}") + logger.trace(f"[资源同步] 动作 {action} 分组数量: {len(device_action_groups)}, 总数量: {len(resource_uuid_list)}") # 为每个(device_id, action)创建独立的更新线程 for (device_id, actual_action), items in device_action_groups.items(): - logger.info(f"设备 {device_id} 物料更新 {actual_action} 数量: {len(items)}") + logger.trace(f"[资源同步] {device_id} 物料动作 {actual_action} 数量: {len(items)}") def _notify_resource_tree(dev_id, act, item_list): try: @@ -902,28 +902,28 @@ class MessageProcessor: async def _handle_request_restart(self, data: Dict[str, Any]): """ 处理重启请求 - + 当LabGo发送request_restart时,执行清理并触发重启 """ reason = data.get("reason", "unknown") delay = data.get("delay", 2) # 默认延迟2秒 logger.info(f"[MessageProcessor] Received restart request, reason: {reason}, delay: {delay}s") - + # 发送确认消息 if self.websocket_client: await self.websocket_client.send_message({ "action": "restart_acknowledged", "data": {"reason": reason, "delay": delay} }) - + # 设置全局重启标志 import unilabos.app.main as main_module main_module._restart_requested = True main_module._restart_reason = reason - + # 延迟后执行清理 await asyncio.sleep(delay) - + # 在新线程中执行清理,避免阻塞当前事件循环 def do_cleanup(): import time @@ -937,7 +937,7 @@ class MessageProcessor: logger.error("[MessageProcessor] Cleanup failed") except Exception as e: logger.error(f"[MessageProcessor] Error during cleanup: {e}") - + cleanup_thread = threading.Thread(target=do_cleanup, name="RestartCleanupThread", daemon=True) cleanup_thread.start() logger.info(f"[MessageProcessor] Restart cleanup scheduled") @@ -1375,7 +1375,7 @@ class WebSocketClient(BaseCommunicationClient): # 收集设备信息 devices = [] machine_name = BasicConfig.machine_name - + try: host_node = HostNode.get_instance(0) if host_node: @@ -1383,7 +1383,7 @@ class WebSocketClient(BaseCommunicationClient): for device_id, namespace in host_node.devices_names.items(): device_key = f"{namespace}/{device_id}" if namespace.startswith("/") else f"/{namespace}/{device_id}" is_online = device_key in host_node._online_devices - + # 获取设备的动作信息 actions = {} for action_id, client in host_node._action_clients.items(): @@ -1394,7 +1394,7 @@ class WebSocketClient(BaseCommunicationClient): "action_path": action_id, "action_type": str(type(client).__name__), } - + devices.append({ "device_id": device_id, "namespace": namespace, @@ -1403,7 +1403,7 @@ class WebSocketClient(BaseCommunicationClient): "machine_name": host_node.device_machine_names.get(device_id, machine_name), "actions": actions, }) - + logger.info(f"[WebSocketClient] Collected {len(devices)} devices for host_ready") except Exception as e: logger.warning(f"[WebSocketClient] Error collecting device info: {e}") diff --git a/unilabos/device_comms/opcua_client/client.py b/unilabos/device_comms/opcua_client/client.py index b45cded..27a401f 100644 --- a/unilabos/device_comms/opcua_client/client.py +++ b/unilabos/device_comms/opcua_client/client.py @@ -141,7 +141,7 @@ class BaseClient(UniversalDriver): try: self.client.connect() logger.info('client connected!') - + # 连接后开始查找节点 if self._variables_to_find: self._find_nodes() @@ -150,36 +150,36 @@ class BaseClient(UniversalDriver): raise else: raise ValueError('client is not initialized') - + def _find_nodes(self) -> None: """查找服务器中的节点""" if not self.client: raise ValueError('client is not connected') - + logger.info(f'开始查找 {len(self._variables_to_find)} 个节点...') try: # 获取根节点 root = self.client.get_root_node() objects = root.get_child(["0:Objects"]) - + # 记录查找前的状态 before_count = len(self._node_registry) - + # 查找节点 self._find_nodes_recursive(objects) - + # 记录查找后的状态 after_count = len(self._node_registry) newly_found = after_count - before_count - + logger.info(f"本次查找新增 {newly_found} 个节点,当前共 {after_count} 个") - + # 检查是否所有节点都已找到 not_found = [] for var_name, var_info in self._variables_to_find.items(): if var_name not in self._node_registry: not_found.append(var_name) - + if not_found: logger.warning(f"⚠ 以下 {len(not_found)} 个节点未找到: {', '.join(not_found[:10])}{'...' if len(not_found) > 10 else ''}") logger.warning(f"提示:请检查这些节点名称是否与服务器的 BrowseName 完全匹配(包括大小写、空格等)") @@ -188,7 +188,7 @@ class BaseClient(UniversalDriver): logger.info(f"尝试在服务器中查找第一个未找到的节点 '{not_found[0]}' 的相似节点...") else: logger.info(f"✓ 所有 {len(self._variables_to_find)} 个节点均已找到并注册") - + except Exception as e: logger.error(f"查找节点失败: {e}") traceback.print_exc() @@ -199,14 +199,14 @@ class BaseClient(UniversalDriver): # 获取当前节点的浏览名称 browse_name = node.get_browse_name() node_name = browse_name.Name - + # 检查是否是我们要找的变量 if node_name in self._variables_to_find and node_name not in self._node_registry: var_info = self._variables_to_find[node_name] node_type = var_info.get("node_type") data_type = var_info.get("data_type") node_id_str = str(node.nodeid) - + # 根据节点类型创建相应的对象 if node_type == NodeType.VARIABLE: self._node_registry[node_name] = Variable(self.client, node_name, node_id_str, data_type) @@ -219,11 +219,11 @@ class BaseClient(UniversalDriver): parent_node_id = str(parent_node.nodeid) self._node_registry[node_name] = Method(self.client, node_name, node_id_str, parent_node_id, data_type) logger.info(f"✓ 找到方法节点: '{node_name}', NodeId: {node_id_str}, ParentId: {parent_node_id}") - + # 递归处理子节点 for child in node.get_children(): self._find_nodes_recursive(child) - + except Exception as e: # 忽略处理单个节点时的错误,继续处理其他节点 pass @@ -238,50 +238,50 @@ class BaseClient(UniversalDriver): df = pd.read_csv(file_path) df = df.drop_duplicates(subset='Name', keep='first') # 重复的数据应该报错 nodes = [] - + # 检查是否包含英文名称列和节点语言列 has_english_name = 'EnglishName' in df.columns has_node_language = 'NodeLanguage' in df.columns - + # 如果存在英文名称列,创建名称映射字典 name_mapping = {} reverse_mapping = {} - + for _, row in df.iterrows(): name = row.get('Name') node_type_str = row.get('NodeType') data_type_str = row.get('DataType') - + # 获取英文名称和节点语言(如果有) english_name = row.get('EnglishName') if has_english_name else None node_language = row.get('NodeLanguage') if has_node_language else 'English' # 默认为英文 - + # 如果有英文名称,添加到映射字典 if english_name and not pd.isna(english_name) and node_language == 'Chinese': name_mapping[english_name] = name reverse_mapping[name] = english_name - + if not name or not node_type_str: logger.warning(f"跳过无效行: 名称或节点类型缺失") continue - + # 只支持VARIABLE和METHOD两种类型 if node_type_str not in ['VARIABLE', 'METHOD']: logger.warning(f"不支持的节点类型: {node_type_str},仅支持VARIABLE和METHOD") continue - + try: node_type = NodeType[node_type_str] except KeyError: logger.warning(f"无效的节点类型: {node_type_str}") continue - + # 对于VARIABLE节点,必须指定数据类型 if node_type == NodeType.VARIABLE: if not data_type_str or pd.isna(data_type_str): logger.warning(f"变量节点 {name} 必须指定数据类型") continue - + try: data_type = DataType[data_type_str] except KeyError: @@ -295,14 +295,14 @@ class BaseClient(UniversalDriver): data_type = DataType[data_type_str] except KeyError: logger.warning(f"无效的数据类型: {data_type_str},将使用默认值") - + # 创建节点对象,节点ID留空,将通过自动查找功能获取 nodes.append(OpcUaNode( name=name, node_type=node_type, data_type=data_type )) - + # 返回节点列表和名称映射字典 return nodes, name_mapping, reverse_mapping @@ -328,7 +328,7 @@ class BaseClient(UniversalDriver): logger.info(f"重新查找成功: '{chinese_name}', NodeId: {node.node_id}") return node raise ValueError(f'节点 {chinese_name} (英文名: {name}) 未注册或未找到') - + # 直接使用原始名称查找 if name not in self._node_registry: if name in self._variables_to_find: @@ -368,14 +368,14 @@ class BaseClient(UniversalDriver): for node in node_list: if node is None: continue - + if node.name in self._node_registry: logger.debug(f'节点 "{node.name}" 已存在于注册表') exist = self._node_registry[node.name] if exist.type != node.node_type: raise ValueError(f'节点 {node.name} 类型 {node.node_type} 与已存在的类型 {exist.type} 不一致') continue - + # 将节点添加到待查找列表 self._variables_to_find[node.name] = { "node_type": node.node_type, @@ -385,11 +385,11 @@ class BaseClient(UniversalDriver): logger.debug(f'添加节点 "{node.name}" ({node.node_type}) 到待查找列表') logger.info(f'节点注册完成:新增 {new_nodes_count} 个待查找节点,总计 {len(self._variables_to_find)} 个') - + # 如果客户端已连接,立即开始查找 if self.client: self._find_nodes() - + return self def run_opcua_workflow(self, workflow: OpcUaWorkflow) -> None: @@ -480,7 +480,7 @@ class BaseClient(UniversalDriver): def create_node_function(self, func_name: str = None, node_name: str = None, mode: str = None, value: Any = None, **kwargs) -> Callable[[Callable[[str], OpcUaNodeBase]], bool]: def execute_node_function(use_node: Callable[[str], OpcUaNodeBase]) -> Union[bool, Tuple[Any, bool]]: target_node = use_node(node_name) - + # 检查是否有对应的参数值可用 current_value = value if hasattr(self, '_workflow_params') and func_name in self._workflow_params: @@ -488,19 +488,21 @@ class BaseClient(UniversalDriver): print(f"使用参数值 {func_name} = {current_value}") else: print(f"执行 {node_name}, {type(target_node).__name__}, {target_node.node_id}, {mode}, {current_value}") - + if mode == 'read': result_str = self.read_node(node_name) - + try: # 将字符串转换为字典 result_str = result_str.replace("'", '"') # 替换单引号为双引号以便JSON解析 result_dict = json.loads(result_str) - + # 从字典获取值和错误标志 val = result_dict.get("value") err = result_dict.get("error") - + + print(f"读取 {node_name} 返回值 = {val} (类型: {type(val).__name__}), 错误 = {err}") + print(f"读取 {node_name} 返回值 = {val} (类型: {type(val).__name__}, 错误 = {err}") return val, err except Exception as e: @@ -510,7 +512,7 @@ class BaseClient(UniversalDriver): # 构造完整的JSON输入,包含node_name和value input_json = json.dumps({"node_name": node_name, "value": current_value}) result_str = self.write_node(input_json) - + try: # 解析返回的字符串为字典 result_str = result_str.replace("'", '"') # 替换单引号为双引号以便JSON解析 @@ -527,19 +529,19 @@ class BaseClient(UniversalDriver): print(f"调用方法 {node_name} 参数 = {args}, 返回值 = {result}") return result return False - + if func_name is None: func_name = f"{node_name}_{mode}_{str(value)}" - + print(f"创建 node function: {mode}, {func_name}") self.function_name[func_name] = execute_node_function - + return execute_node_function - + def create_init_function(self, func_name: str = None, write_nodes: Union[Dict[str, Any], List[str]] = None): """ 创建初始化函数 - + 参数: func_name: 函数名称 write_nodes: 写节点配置,可以是节点名列表[节点1,节点2]或节点值映射{节点1:值1,节点2:值2} @@ -599,25 +601,25 @@ class BaseClient(UniversalDriver): except Exception as e: print(f"初始化函数: 解析写入结果失败: {e}, 原始结果: {result_str}") return True - + if func_name is None: func_name = f"init_function_{str(time.time())}" - + print(f"创建初始化函数: {func_name}") self.function_name[func_name] = execute_init_function return execute_init_function - + def create_stop_function(self, func_name: str = None, write_nodes: Union[Dict[str, Any], List[str]] = None): """ 创建停止函数 - + 参数: func_name: 函数名称 write_nodes: 写节点配置,可以是节点名列表[节点1,节点2]或节点值映射{节点1:值1,节点2:值2} """ if write_nodes is None: raise ValueError("必须提供write_nodes参数") - + def execute_stop_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: if isinstance(write_nodes, list): # 处理节点列表,默认值都是False @@ -647,25 +649,25 @@ class BaseClient(UniversalDriver): except Exception as e: print(f"停止函数: 解析写入结果失败: {e}, 原始结果: {result_str}") return True - + if func_name is None: func_name = f"stop_function_{str(time.time())}" - + print(f"创建停止函数: {func_name}") self.function_name[func_name] = execute_stop_function return execute_stop_function - + def create_cleanup_function(self, func_name: str = None, write_nodes: Union[Dict[str, Any], List[str]] = None): """ 创建清理函数 - + 参数: func_name: 函数名称 write_nodes: 写节点配置,可以是节点名列表[节点1,节点2]或节点值映射{节点1:值1,节点2:值2} """ if write_nodes is None: raise ValueError("必须提供write_nodes参数") - + def execute_cleanup_function(use_node: Callable[[str], OpcUaNodeBase]) -> bool: if isinstance(write_nodes, list): # 处理节点列表,默认值都是False @@ -695,10 +697,10 @@ class BaseClient(UniversalDriver): except Exception as e: print(f"清理函数: 解析写入结果失败: {e}, 原始结果: {result_str}") return True - + if func_name is None: func_name = f"cleanup_function_{str(time.time())}" - + print(f"创建清理函数: {func_name}") self.function_name[func_name] = execute_cleanup_function return execute_cleanup_function @@ -706,7 +708,7 @@ class BaseClient(UniversalDriver): def create_start_function(self, func_name: str, stop_condition_expression: str = "True", write_nodes: Union[Dict[str, Any], List[str]] = None, condition_nodes: Union[Dict[str, str], List[str]] = None): """ 创建开始函数 - + 参数: func_name: 函数名称 stop_condition_expression: 停止条件表达式,可直接引用节点名称 @@ -756,16 +758,16 @@ class BaseClient(UniversalDriver): print(f"直接写入 {node_name} = {current_value}, 结果: {success}") except Exception as e: print(f"解析直接写入结果失败: {e}, 原始结果: {result_str}") - + # 如果没有条件节点,立即返回 if not condition_nodes: return True - + # 处理条件检查和等待 while True: next_loop = False condition_source = {} - + # 直接读取条件节点 if isinstance(condition_nodes, list): # 处理节点列表 @@ -779,11 +781,11 @@ class BaseClient(UniversalDriver): read_res = result_dict.get("value") read_err = result_dict.get("error", False) print(f"直接读取 {node_name} 返回值 = {read_res}, 错误 = {read_err}") - + if read_err: next_loop = True break - + # 将节点值存入条件源字典,使用节点名称作为键 condition_source[node_name] = read_res # 为了向后兼容,也保留read_i格式 @@ -804,11 +806,11 @@ class BaseClient(UniversalDriver): read_res = result_dict.get("value") read_err = result_dict.get("error", False) print(f"直接读取 {node_name} 返回值 = {read_res}, 错误 = {read_err}") - + if read_err: next_loop = True break - + # 将节点值存入条件源字典 condition_source[node_name] = read_res # 也保存使用函数名作为键 @@ -817,13 +819,13 @@ class BaseClient(UniversalDriver): print(f"解析直接读取结果失败: {e}, 原始结果: {result_str}") next_loop = True break - + if not next_loop: if stop_condition_expression: # 添加调试信息 print(f"条件源数据: {condition_source}") condition_source["__RESULT"] = None - + # 确保安全地执行条件表达式 try: # 先尝试使用eval更安全的方式计算表达式 @@ -837,10 +839,10 @@ class BaseClient(UniversalDriver): except Exception as e2: print(f"使用exec执行表达式也失败: {e2}") condition_source["__RESULT"] = False - + res = condition_source["__RESULT"] print(f"取得计算结果: {res}, 条件表达式: {stop_condition_expression}") - + if res: print("满足停止条件,结束工作流") break @@ -849,21 +851,21 @@ class BaseClient(UniversalDriver): break else: time.sleep(0.3) - + return True - + self.function_name[func_name] = execute_start_function return execute_start_function create_action_from_json = None - + def create_action_from_json(self, data: Union[Dict, Any]) -> WorkflowAction: """ 从JSON配置创建工作流动作 - + 参数: data: 动作JSON数据 - + 返回: WorkflowAction对象 """ @@ -874,7 +876,7 @@ class BaseClient(UniversalDriver): stop_function = None init_function = None cleanup_function = None - + # 提取start_function相关信息 if hasattr(data, "start_function") and data.start_function: start_function = data.start_function @@ -888,31 +890,31 @@ class BaseClient(UniversalDriver): write_nodes = start_function["write_nodes"] if "condition_nodes" in start_function: condition_nodes = start_function["condition_nodes"] - + # 提取stop_function信息 if hasattr(data, "stop_function") and data.stop_function: stop_function = data.stop_function elif isinstance(data, dict) and data.get("stop_function"): stop_function = data.get("stop_function") - + # 提取init_function信息 if hasattr(data, "init_function") and data.init_function: init_function = data.init_function elif isinstance(data, dict) and data.get("init_function"): init_function = data.get("init_function") - + # 提取cleanup_function信息 if hasattr(data, "cleanup_function") and data.cleanup_function: cleanup_function = data.cleanup_function elif isinstance(data, dict) and data.get("cleanup_function"): cleanup_function = data.get("cleanup_function") - + # 创建工作流动作组件 init = None start = None stop = None cleanup = None - + # 处理init function if init_function: init_params = {"func_name": init_function.get("func_name")} @@ -921,9 +923,9 @@ class BaseClient(UniversalDriver): else: # 如果没有write_nodes,创建一个空字典 init_params["write_nodes"] = {} - + init = self.create_init_function(**init_params) - + # 处理start function if start_function: start_params = { @@ -933,7 +935,7 @@ class BaseClient(UniversalDriver): "condition_nodes": condition_nodes } start = self.create_start_function(**start_params) - + # 处理stop function if stop_function: stop_params = { @@ -941,7 +943,7 @@ class BaseClient(UniversalDriver): "write_nodes": stop_function.get("write_nodes", {}) } stop = self.create_stop_function(**stop_params) - + # 处理cleanup function if cleanup_function: cleanup_params = { @@ -949,22 +951,22 @@ class BaseClient(UniversalDriver): "write_nodes": cleanup_function.get("write_nodes", {}) } cleanup = self.create_cleanup_function(**cleanup_params) - + return WorkflowAction(init=init, start=start, stop=stop, cleanup=cleanup) - + workflow_name: Dict[str, OpcUaWorkflowModel] = {} def create_workflow_from_json(self, data: List[Dict]) -> None: """ 从JSON配置创建工作流程序 - + 参数: data: 工作流配置列表 """ for ind, flow_dict in enumerate(data): print(f"正在创建 workflow {ind}, {flow_dict['name']}") actions = [] - + for i in flow_dict["action"]: if isinstance(i, str): print(f"沿用已有 workflow 作为 action: {i}") @@ -973,14 +975,14 @@ class BaseClient(UniversalDriver): print("创建 action") # 直接将字典转换为SimplifiedActionJson对象或直接使用字典 action = self.create_action_from_json(i) - + actions.append(action) - + # 获取参数 parameters = flow_dict.get("parameters", []) - + flow_instance = OpcUaWorkflowModel( - name=flow_dict["name"], + name=flow_dict["name"], actions=actions, parameters=parameters, description=flow_dict.get("description", "") @@ -1005,19 +1007,19 @@ class BaseClient(UniversalDriver): register_params = data.register_node_list_from_csv_path create_flow = data.create_flow execute_flow = data.execute_flow if hasattr(data, "execute_flow") else [] - + # 注册节点 if register_params: print(f"注册节点 csv: {register_params}") self.register_node_list_from_csv_path(**register_params) - + # 创建工作流 print("创建工作流") self.create_workflow_from_json(create_flow) - + # 注册工作流为实例方法 self.register_workflows_as_methods() - + # 如果存在execute_flow字段,则执行指定的工作流(向后兼容) if execute_flow: print("执行工作流") @@ -1029,12 +1031,12 @@ class BaseClient(UniversalDriver): # 获取工作流的参数信息(如果存在) workflow_params = getattr(workflow, 'parameters', []) or [] workflow_desc = getattr(workflow, 'description', None) or f"执行工作流: {workflow_name}" - + # 创建执行工作流的方法 def create_workflow_method(wf_name=workflow_name, wf=workflow, params=workflow_params): def workflow_method(*args, **kwargs): logger.info(f"执行工作流: {wf_name}, 参数: {args}, {kwargs}") - + # 处理传入的参数 if params and (args or kwargs): # 将位置参数转换为关键字参数 @@ -1042,31 +1044,31 @@ class BaseClient(UniversalDriver): for i, param_name in enumerate(params): if i < len(args): params_dict[param_name] = args[i] - + # 合并关键字参数 params_dict.update(kwargs) - + # 保存参数,供节点函数使用 self._workflow_params = params_dict else: self._workflow_params = {} - + # 执行工作流 result = self.run_opcua_workflow_model(wf) - + # 清理参数 self._workflow_params = {} - + return result - + # 设置方法的文档字符串 workflow_method.__doc__ = workflow_desc if params: param_doc = ", ".join(params) workflow_method.__doc__ += f"\n参数: {param_doc}" - + return workflow_method - + # 注册为实例方法 method = create_workflow_method() setattr(self, workflow_name, method) @@ -1082,7 +1084,7 @@ class BaseClient(UniversalDriver): try: node = self.use_node(node_name) value, error = node.read() - + # 创建结果字典 result = { "value": value, @@ -1090,7 +1092,7 @@ class BaseClient(UniversalDriver): "node_name": node_name, "timestamp": time.time() } - + # 返回JSON字符串 return json.dumps(result) except Exception as e: @@ -1104,7 +1106,7 @@ class BaseClient(UniversalDriver): "timestamp": time.time() } return json.dumps(result) - + def write_node(self, json_input: str) -> str: """ 写入节点值的便捷方法 @@ -1118,24 +1120,24 @@ class BaseClient(UniversalDriver): # 解析JSON格式的输入 if not isinstance(json_input, str): json_input = str(json_input) - + try: input_data = json.loads(json_input) if not isinstance(input_data, dict): return json.dumps({"error": True, "error_message": "输入必须是包含node_name和value的JSON对象", "success": False}) - + # 从JSON中提取节点名称和值 node_name = input_data.get("node_name") value = input_data.get("value") - + if node_name is None: return json.dumps({"error": True, "error_message": "JSON中缺少node_name字段", "success": False}) except json.JSONDecodeError as e: return json.dumps({"error": True, "error_message": f"JSON解析错误: {str(e)}", "success": False}) - + node = self.use_node(node_name) error = node.write(value) - + # 创建结果字典 result = { "value": value, @@ -1144,7 +1146,7 @@ class BaseClient(UniversalDriver): "timestamp": time.time(), "success": not error } - + return json.dumps(result) except Exception as e: logger.error(f"写入节点失败: {e}") @@ -1155,7 +1157,7 @@ class BaseClient(UniversalDriver): "success": False } return json.dumps(result) - + def call_method(self, node_name: str, *args) -> Tuple[Any, bool]: """ 调用方法节点的便捷方法 @@ -1175,11 +1177,11 @@ class BaseClient(UniversalDriver): class OpcUaClient(BaseClient): def __init__( - self, - url: str, + self, + url: str, deck: Optional[Union[post_process_deck, Dict[str, Any]]] = None, - config_path: str = None, - username: str = None, + config_path: str = None, + username: str = None, password: str = None, use_subscription: bool = True, cache_timeout: float = 5.0, @@ -1190,9 +1192,10 @@ class OpcUaClient(BaseClient): # 降低OPCUA库的日志级别 import logging logging.getLogger("opcua").setLevel(logging.WARNING) - + super().__init__() + # ===== 关键修改:参照 BioyondWorkstation 处理 deck ===== super().__init__() @@ -1215,15 +1218,15 @@ class OpcUaClient(BaseClient): if hasattr(self.deck, 'children'): warehouse_count = len(self.deck.children) logger.info(f"Deck 初始化完成,加载 {warehouse_count} 个资源") - - + + # OPC UA 客户端初始化 client = Client(url) - + if username and password: client.set_user(username) client.set_password(password) - + self._set_client(client) # 订阅相关属性 @@ -1231,30 +1234,30 @@ class OpcUaClient(BaseClient): self._subscription = None self._subscription_handles = {} self._subscription_interval = subscription_interval - + # 缓存相关属性 self._node_values = {} # 修改为支持时间戳的缓存结构 self._cache_timeout = cache_timeout - + # 连接状态监控 self._connection_check_interval = 30.0 # 连接检查间隔(秒) self._connection_monitor_running = False self._connection_monitor_thread = None - + # 添加线程锁,保护OPC UA客户端的并发访问 import threading self._client_lock = threading.RLock() - + # 连接到服务器 self._connect() - + # 如果提供了配置文件路径,则加载配置并注册工作流 if config_path: self.load_config(config_path) - + # 启动连接监控 self._start_connection_monitor() - + def _connect(self) -> None: """连接到OPC UA服务器""" @@ -1263,23 +1266,23 @@ class OpcUaClient(BaseClient): try: self.client.connect() logger.info('✓ 客户端已连接!') - + # 连接后开始查找节点 if self._variables_to_find: self._find_nodes() - + # 如果启用订阅模式,设置订阅 if self._use_subscription: self._setup_subscriptions() else: logger.info("订阅模式已禁用,将使用按需读取模式") - + except Exception as e: logger.error(f'客户端连接失败: {e}') raise else: raise ValueError('客户端未初始化') - + class SubscriptionHandler: """freeopcua订阅处理器:必须实现 datachange_notification 方法""" def __init__(self, outer): @@ -1300,22 +1303,22 @@ class OpcUaClient(BaseClient): """设置 OPC UA 订阅""" if not self.client or not self._use_subscription: return - + with self._client_lock: try: logger.info(f"开始设置订阅 (发布间隔: {self._subscription_interval}ms)...") - + # 创建订阅 handler = OpcUaClient.SubscriptionHandler(self) self._subscription = self.client.create_subscription( self._subscription_interval, handler ) - + # 为所有变量节点创建监控项 subscribed_count = 0 skipped_count = 0 - + for node_name, node in self._node_registry.items(): # 只为变量节点创建订阅 if node.type == NodeType.VARIABLE and node.node_id: @@ -1333,16 +1336,16 @@ class OpcUaClient(BaseClient): logger.warning(f"✗ 订阅节点 {node_name} 失败: {e}") else: skipped_count += 1 - + logger.info(f"订阅设置完成: 成功 {subscribed_count} 个, 跳过 {skipped_count} 个") - + except Exception as e: logger.error(f"设置订阅失败: {e}") traceback.print_exc() # 订阅失败时回退到按需读取模式 self._use_subscription = False logger.warning("订阅模式设置失败,已自动切换到按需读取模式") - + def _on_subscription_datachange(self, node, val, data): """订阅数据变化处理器(供内部 SubscriptionHandler 调用)""" try: @@ -1360,11 +1363,11 @@ class OpcUaClient(BaseClient): break except Exception as e: logger.error(f"处理订阅数据失败: {e}") - + def get_node_value(self, name, use_cache=True, force_read=False): """ 获取节点值(智能缓存版本) - + 参数: name: 节点名称(支持中文名或英文名) use_cache: 是否使用缓存 @@ -1373,11 +1376,28 @@ class OpcUaClient(BaseClient): # 处理名称映射 if name in self._name_mapping: chinese_name = self._name_mapping[name] + # 优先从缓存获取值 + if chinese_name in self._node_values: + return self._node_values[chinese_name] + # 缓存中没有则直接读取 + value, _ = self.use_node(chinese_name).read() + return value + # 如果提供的是中文名,直接使用 + elif name in self._node_registry: + # 优先从缓存获取值 + if name in self._node_values: + return self._node_values[name] + # 缓存中没有则直接读取 + value, _ = self.use_node(name).read() + return value + else: + raise ValueError(f"未找到名称为 '{name}' 的节点") + elif name in self._node_registry: chinese_name = name else: raise ValueError(f"未找到名称为 '{name}' 的节点") - + # 如果强制读取,直接从服务器读取 if force_read: with self._client_lock: @@ -1389,18 +1409,18 @@ class OpcUaClient(BaseClient): 'source': 'forced_read' } return value - + # 检查缓存 if use_cache and chinese_name in self._node_values: cache_entry = self._node_values[chinese_name] cache_age = time.time() - cache_entry['timestamp'] - + # 如果是订阅模式,缓存永久有效(由订阅更新) # 如果是按需读取模式,检查缓存超时 if cache_entry.get('source') == 'subscription' or cache_age < self._cache_timeout: logger.debug(f"从缓存读取: {chinese_name} = {cache_entry['value']} (age: {cache_age:.2f}s, source: {cache_entry.get('source', 'unknown')})") return cache_entry['value'] - + # 缓存过期或不存在,从服务器读取 with self._client_lock: try: @@ -1419,7 +1439,7 @@ class OpcUaClient(BaseClient): except Exception as e: logger.error(f"读取节点 {chinese_name} 出错: {e}") return None - + def set_node_value(self, name, value): """ 设置节点值 @@ -1432,12 +1452,28 @@ class OpcUaClient(BaseClient): chinese_name = name else: raise ValueError(f"未找到名称为 '{name}' 的节点") - + + # 写入值 + error = node.write(value) + if not error: + # 更新缓存 + if hasattr(node, 'name'): + self._node_values[node.name] = value + return True + return False + + def _refresh_worker(self): + """节点值刷新线程的工作函数""" + self._refresh_running = True + logger.info(f"节点值刷新线程已启动,刷新间隔: {self._refresh_interval}秒") + + while self._refresh_running: + with self._client_lock: try: node = self.use_node(chinese_name) error = node.write(value) - + if not error: # 写入成功,立即更新缓存 self._node_values[chinese_name] = { @@ -1453,7 +1489,7 @@ class OpcUaClient(BaseClient): except Exception as e: logger.error(f"写入节点 {chinese_name} 出错: {e}") return False - + def _check_connection(self) -> bool: """检查连接状态""" try: @@ -1466,22 +1502,33 @@ class OpcUaClient(BaseClient): logger.warning(f"连接检查失败: {e}") return False return False - + def _connection_monitor_worker(self): """连接监控线程工作函数""" self._connection_monitor_running = True logger.info(f"连接监控线程已启动 (检查间隔: {self._connection_check_interval}秒)") - + reconnect_attempts = 0 max_reconnect_attempts = 5 - + while self._connection_monitor_running: try: + self.refresh_node_values() + except Exception as e: + logger.error(f"节点值刷新过程出错: {e}") + + # 等待下一次刷新 + time.sleep(self._refresh_interval) + + def start_node_refresh(self): + """启动节点值刷新线程""" + if self._refresh_thread is not None and self._refresh_thread.is_alive(): + logger.warning("节点值刷新线程已在运行") # 检查连接状态 if not self._check_connection(): logger.warning("检测到连接断开,尝试重新连接...") reconnect_attempts += 1 - + if reconnect_attempts <= max_reconnect_attempts: try: # 尝试重新连接 @@ -1491,14 +1538,14 @@ class OpcUaClient(BaseClient): self.client.disconnect() except: pass - + self.client.connect() logger.info("✓ 重新连接成功") - + # 重新设置订阅 if self._use_subscription: self._setup_subscriptions() - + reconnect_attempts = 0 except Exception as e: logger.error(f"重新连接失败 (尝试 {reconnect_attempts}/{max_reconnect_attempts}): {e}") @@ -1509,34 +1556,44 @@ class OpcUaClient(BaseClient): else: # 连接正常,重置重连计数 reconnect_attempts = 0 - + except Exception as e: logger.error(f"连接监控出错: {e}") - + # 等待下次检查 time.sleep(self._connection_check_interval) - + def _start_connection_monitor(self): """启动连接监控线程""" if self._connection_monitor_thread is not None and self._connection_monitor_thread.is_alive(): logger.warning("连接监控线程已在运行") return - + import threading + self._refresh_thread = threading.Thread(target=self._refresh_worker, daemon=True) + self._refresh_thread.start() + + def stop_node_refresh(self): + """停止节点值刷新线程""" + self._refresh_running = False + if self._refresh_thread and self._refresh_thread.is_alive(): + self._refresh_thread.join(timeout=2.0) + logger.info("节点值刷新线程已停止") + self._connection_monitor_thread = threading.Thread( - target=self._connection_monitor_worker, + target=self._connection_monitor_worker, daemon=True, name="OpcUaConnectionMonitor" ) self._connection_monitor_thread.start() - + def _stop_connection_monitor(self): """停止连接监控线程""" self._connection_monitor_running = False if self._connection_monitor_thread and self._connection_monitor_thread.is_alive(): self._connection_monitor_thread.join(timeout=2.0) logger.info("连接监控线程已停止") - + def read_node(self, node_name: str) -> str: """ 读取节点值的便捷方法(使用缓存) @@ -1545,11 +1602,11 @@ class OpcUaClient(BaseClient): try: # 使用get_node_value方法,自动处理缓存 value = self.get_node_value(node_name, use_cache=True) - + # 获取缓存信息 chinese_name = self._name_mapping.get(node_name, node_name) cache_info = self._node_values.get(chinese_name, {}) - + result = { "value": value, "error": False, @@ -1558,7 +1615,7 @@ class OpcUaClient(BaseClient): "cache_age": time.time() - cache_info.get('timestamp', time.time()), "source": cache_info.get('source', 'unknown') } - + return json.dumps(result) except Exception as e: logger.error(f"读取节点 {node_name} 失败: {e}") @@ -1582,21 +1639,21 @@ class OpcUaClient(BaseClient): 'cache_timeout': self._cache_timeout, 'using_subscription': self._use_subscription } - + for node_name, cache_entry in self._node_values.items(): source = cache_entry.get('source', 'unknown') cache_age = current_time - cache_entry['timestamp'] - + if source == 'subscription': stats['subscription_nodes'] += 1 elif source in ['on_demand_read', 'forced_read', 'write']: stats['on_demand_nodes'] += 1 - + if cache_age > self._cache_timeout: stats['expired_nodes'] += 1 - + return stats - + def print_cache_stats(self): """打印缓存统计信息""" stats = self.get_cache_stats() @@ -1610,37 +1667,41 @@ class OpcUaClient(BaseClient): print(f" - 已过期节点: {stats['expired_nodes']}") print(f"缓存超时时间: {stats['cache_timeout']}秒") print("="*80 + "\n") - + def load_config(self, config_path: str) -> None: """从JSON配置文件加载并注册工作流""" try: with open(config_path, 'r', encoding='utf-8') as f: config_data = json.load(f) - + # 处理节点注册 if "register_node_list_from_csv_path" in config_data: config_dir = os.path.dirname(os.path.abspath(config_path)) - + + # 处理CSV路径,如果是相对路径,则相对于配置文件所在目录 + if "path" in config_data["register_node_list_from_csv_path"]: csv_path = config_data["register_node_list_from_csv_path"]["path"] if not os.path.isabs(csv_path): csv_path = os.path.join(config_dir, csv_path) config_data["register_node_list_from_csv_path"]["path"] = csv_path - + + # 直接使用字典 + self.register_node_list_from_csv_path(**config_data["register_node_list_from_csv_path"]) - + if self.client and self._variables_to_find: logger.info("CSV加载完成,开始查找服务器节点...") self._find_nodes() - + # 处理工作流创建 if "create_flow" in config_data: self.create_workflow_from_json(config_data["create_flow"]) self.register_workflows_as_methods() - - # 将所有节点注册为属性 + + # 将所有节点注册为属性(只注册已找到的节点) self._register_nodes_as_attributes() - + # 打印统计信息 found_count = len(self._node_registry) total_count = len(self._variables_to_find) @@ -1648,23 +1709,114 @@ class OpcUaClient(BaseClient): logger.warning(f"节点查找完成:找到 {found_count}/{total_count} 个节点") else: logger.info(f"✓ 节点查找完成:所有 {found_count} 个节点均已找到") - + # 如果使用订阅模式,重新设置订阅(确保新节点被订阅) if self._use_subscription and found_count > 0: self._setup_subscriptions() - + logger.info(f"成功从 {config_path} 加载配置") except Exception as e: logger.error(f"加载配置文件 {config_path} 失败: {e}") traceback.print_exc() - + + def print_node_registry_status(self): + """打印节点注册状态,用于调试""" + print("\n" + "="*80) + print("节点注册状态诊断报告") + print("="*80) + print(f"\n待查找节点总数: {len(self._variables_to_find)}") + print(f"已找到节点总数: {len(self._node_registry)}") + print(f"未找到节点总数: {len(self._variables_to_find) - len(self._node_registry)}") + + # 显示已找到的节点(前10个) + if self._node_registry: + print(f"\n✓ 已找到的节点 (显示前10个):") + for i, (name, node) in enumerate(list(self._node_registry.items())[:10]): + eng_name = self._reverse_mapping.get(name, "") + eng_info = f" ({eng_name})" if eng_name else "" + print(f" {i+1}. '{name}'{eng_info}") + print(f" NodeId: {node.node_id}") + print(f" Type: {node.type}") + + # 显示未找到的节点 + not_found = [name for name in self._variables_to_find if name not in self._node_registry] + if not_found: + print(f"\n✗ 未找到的节点 (显示前20个):") + for i, name in enumerate(not_found[:20]): + eng_name = self._reverse_mapping.get(name, "") + eng_info = f" ({eng_name})" if eng_name else "" + node_info = self._variables_to_find[name] + print(f" {i+1}. '{name}'{eng_info} - {node_info['node_type']}") + + print("\n" + "="*80) + print("提示:") + print("1. 如果大量节点未找到,请检查CSV中的节点名称是否与服务器BrowseName完全匹配") + print("2. 可以使用 client.browse_server_nodes() 查看服务器的实际节点结构") + print("3. 节点名称区分大小写,且包括所有空格和特殊字符") + print("="*80 + "\n") + + def browse_server_nodes(self, max_depth=3, start_path=["0:Objects"]): + """浏览服务器节点树,用于调试和对比""" + if not self.client: + print("客户端未连接") + return + + print("\n" + "="*80) + print(f"服务器节点浏览 (最大深度: {max_depth})") + print("="*80 + "\n") + + try: + root = self.client.get_root_node() + start_node = root.get_child(start_path) + self._browse_node_recursive(start_node, depth=0, max_depth=max_depth) + except Exception as e: + print(f"浏览失败: {e}") + traceback.print_exc() + + def _browse_node_recursive(self, node, depth=0, max_depth=3): + """递归浏览节点""" + if depth > max_depth: + return + + try: + browse_name = node.get_browse_name() + node_class = node.get_node_class() + indent = " " * depth + + # 显示节点信息 + print(f"{indent}├─ {browse_name.Name}") + print(f"{indent}│ NodeId: {str(node.nodeid)}") + print(f"{indent}│ NodeClass: {node_class}") + + # 如果是变量,显示数据类型 + if node_class == NodeClass.Variable: + try: + data_type = node.get_data_type() + print(f"{indent}│ DataType: {data_type}") + except: + pass + + # 递归处理子节点(限制数量避免输出过多) + if depth < max_depth: + children = node.get_children() + for i, child in enumerate(children[:20]): # 每层最多显示20个子节点 + self._browse_node_recursive(child, depth + 1, max_depth) + if len(children) > 20: + print(f"{indent} ... ({len(children) - 20} more children)") + except Exception as e: + # 忽略单个节点的错误 + pass + def disconnect(self): + # 停止刷新线程 + self.stop_node_refresh() + """断开连接并清理资源""" logger.info("正在断开连接...") - + # 停止连接监控 self._stop_connection_monitor() - + # 删除订阅 if self._subscription: try: @@ -1673,7 +1825,7 @@ class OpcUaClient(BaseClient): logger.info("订阅已删除") except Exception as e: logger.warning(f"删除订阅失败: {e}") - + # 断开客户端连接 if self.client: try: @@ -1682,22 +1834,22 @@ class OpcUaClient(BaseClient): logger.info("✓ OPC UA 客户端已断开连接") except Exception as e: logger.error(f"断开连接失败: {e}") - + def _register_nodes_as_attributes(self): """将所有节点注册为实例属性""" for node_name, node in self._node_registry.items(): if not node.node_id or node.node_id == "": logger.warning(f"⚠ 节点 '{node_name}' 的 node_id 为空,跳过注册为属性") continue - + eng_name = self._reverse_mapping.get(node_name) attr_name = eng_name if eng_name else node_name.replace(' ', '_').replace('-', '_') - + def create_property_getter(node_key): def getter(self): return self.get_node_value(node_key, use_cache=True) return getter - + setattr(OpcUaClient, attr_name, property(create_property_getter(node_name))) logger.debug(f"已注册节点 '{node_name}' 为属性 '{attr_name}'") @@ -1705,14 +1857,14 @@ class OpcUaClient(BaseClient): """ROS2 节点就绪后的初始化""" if not (hasattr(self, 'deck') and self.deck): return - + if not (hasattr(ros_node, 'resource_tracker') and ros_node.resource_tracker): logger.warning("resource_tracker 不存在,无法注册 deck") return - + # 1. 本地注册(必需) ros_node.resource_tracker.add_resource(self.deck) - + # 2. 上传云端 try: from unilabos.ros.nodes.base_device_node import ROS2DeviceNode @@ -1728,30 +1880,30 @@ class OpcUaClient(BaseClient): if __name__ == '__main__': # 示例用法 - + # 使用配置文件创建客户端并自动注册工作流 import os current_dir = os.path.dirname(os.path.abspath(__file__)) config_path = os.path.join(current_dir, "opcua_huairou.json") - + # 创建OPC UA客户端并加载配置 try: client = OpcUaClient( url="opc.tcp://192.168.1.88:4840/freeopcua/server/", # 替换为实际的OPC UA服务器地址 config_path="D:\\Uni-Lab-OS\\unilabos\\device_comms\\opcua_client\\opcua_huairou.json" # 传入配置文件路径 ) - + # 列出所有已注册的工作流 print("\n已注册的工作流:") for workflow_name in client.workflow_name: print(f" - {workflow_name}") - + # 测试trigger_grab_action工作流 - 使用英文参数名 print("\n测试trigger_grab_action工作流 - 使用英文参数名:") client.trigger_grab_action(reaction_tank_number=2, raw_tank_number=2) # client.set_node_value("reaction_tank_number", 2) - + # 读取节点值 - 使用英文节点名 grab_complete = client.get_node_value("grab_complete") reaction_tank = client.get_node_value("reaction_tank_number") @@ -1761,19 +1913,19 @@ if __name__ == '__main__': print(f" - 抓取完成状态: {grab_complete}") print(f" - 当前反应罐号码: {reaction_tank}") print(f" - 当前原料罐号码: {raw_tank}") - + # 测试节点值写入 - 使用英文节点名 print("\n测试节点值写入 (使用英文节点名):") success = client.set_node_value("atomization_fast_speed", 150.5) print(f" - 写入搅拌浆雾化快速 = 150.5, 结果: {success}") - + # 读取写入的值 atomization_speed = client.get_node_value("atomization_fast_speed") print(f" - 读取搅拌浆雾化快速: {atomization_speed}") - + # 断开连接 client.disconnect() - + except Exception as e: print(f"错误: {e}") traceback.print_exc() diff --git a/unilabos/device_comms/opcua_client/node/uniopcua.py b/unilabos/device_comms/opcua_client/node/uniopcua.py index d99a5fd..a06d780 100644 --- a/unilabos/device_comms/opcua_client/node/uniopcua.py +++ b/unilabos/device_comms/opcua_client/node/uniopcua.py @@ -43,7 +43,7 @@ class Base(ABC): self._type = typ self._data_type = data_type self._node: Optional[Node] = None - + def _get_node(self) -> Node: if self._node is None: try: @@ -66,7 +66,7 @@ class Base(ABC): # 直接以字符串形式处理 if isinstance(nid, str): nid = nid.strip() - + # 处理包含类名的格式,如 'StringNodeId(ns=4;s=...)' 或 'NumericNodeId(ns=2;i=...)' # 提取括号内的内容 match_wrapped = re.match(r'(String|Numeric|Byte|Guid|TwoByteNode|FourByteNode)NodeId\((.*)\)', nid) @@ -116,16 +116,16 @@ class Base(ABC): def read(self) -> Tuple[Any, bool]: """读取节点值,返回(值, 是否出错)""" pass - + @abstractmethod def write(self, value: Any) -> bool: """写入节点值,返回是否出错""" pass - + @property def type(self) -> NodeType: return self._type - + @property def node_id(self) -> str: return self._node_id @@ -210,15 +210,15 @@ class Method(Base): super().__init__(client, name, node_id, NodeType.METHOD, data_type) self._parent_node_id = parent_node_id self._parent_node = None - + def _get_parent_node(self) -> Node: if self._parent_node is None: try: # 处理父节点ID,使用与_get_node相同的解析逻辑 import re - + nid = self._parent_node_id - + # 如果已经是 NodeId 对象,直接使用 try: from opcua.ua import NodeId as UaNodeId @@ -227,16 +227,16 @@ class Method(Base): return self._parent_node except Exception: pass - + # 字符串处理 if isinstance(nid, str): nid = nid.strip() - + # 处理包含类名的格式 match_wrapped = re.match(r'(String|Numeric|Byte|Guid|TwoByteNode|FourByteNode)NodeId\((.*)\)', nid) if match_wrapped: nid = match_wrapped.group(2).strip() - + # 常见短格式 if re.match(r'^ns=\d+;[is]=', nid): self._parent_node = self._client.get_node(nid) @@ -271,7 +271,7 @@ class Method(Base): def write(self, value: Any) -> bool: """方法节点不支持写入操作""" return True - + def call(self, *args) -> Tuple[Any, bool]: """调用方法,返回(返回值, 是否出错)""" try: @@ -285,7 +285,7 @@ class Method(Base): class Object(Base): def __init__(self, client: Client, name: str, node_id: str): super().__init__(client, name, node_id, NodeType.OBJECT, None) - + def read(self) -> Tuple[Any, bool]: """对象节点不支持直接读取操作""" return None, True @@ -293,7 +293,7 @@ class Object(Base): def write(self, value: Any) -> bool: """对象节点不支持直接写入操作""" return True - + def get_children(self) -> Tuple[List[Node], bool]: """获取子节点列表,返回(子节点列表, 是否出错)""" try: @@ -301,4 +301,4 @@ class Object(Base): return children, False except Exception as e: print(f"获取对象 {self._name} 的子节点失败: {e}") - return [], True \ No newline at end of file + return [], True diff --git a/unilabos/devices/workstation/bioyond_studio/bioyond_rpc.py b/unilabos/devices/workstation/bioyond_studio/bioyond_rpc.py index 77cac12..c365be7 100644 --- a/unilabos/devices/workstation/bioyond_studio/bioyond_rpc.py +++ b/unilabos/devices/workstation/bioyond_studio/bioyond_rpc.py @@ -49,6 +49,14 @@ class BioyondV1RPC(BaseRequest): self.config = config self.api_key = config["api_key"] self.host = config["api_host"] + + # 初始化 location_mapping + # 直接从 warehouse_mapping 构建,确保数据源所谓的单一和结构化 + self.location_mapping = {} + warehouse_mapping = self.config.get("warehouse_mapping", {}) + for warehouse_name, warehouse_config in warehouse_mapping.items(): + if "site_uuids" in warehouse_config: + self.location_mapping.update(warehouse_config["site_uuids"]) self._logger = SimpleLogger() self.material_cache = {} self._load_material_cache() @@ -176,7 +184,40 @@ class BioyondV1RPC(BaseRequest): return {} print(f"add material data: {response['data']}") - return response.get("data", {}) + + # 自动更新缓存 + data = response.get("data", {}) + if data: + if isinstance(data, str): + # 如果返回的是字符串,通常是ID + mat_id = data + name = params.get("name") + else: + # 如果返回的是字典,尝试获取name和id + name = data.get("name") or params.get("name") + mat_id = data.get("id") + + if name and mat_id: + self.material_cache[name] = mat_id + print(f"已自动更新缓存: {name} -> {mat_id}") + + # 处理返回数据中的 details (如果有) + # 有些 API 返回结构可能直接包含 details,或者在 data 字段中 + details = data.get("details", []) if isinstance(data, dict) else [] + if not details and isinstance(data, dict): + details = data.get("detail", []) + + if details: + for detail in details: + d_name = detail.get("name") + # 尝试从不同字段获取 ID + d_id = detail.get("id") or detail.get("detailMaterialId") + + if d_name and d_id: + self.material_cache[d_name] = d_id + print(f"已自动更新 detail 缓存: {d_name} -> {d_id}") + + return data def query_matial_type_id(self, data) -> list: """查找物料typeid""" @@ -203,7 +244,7 @@ class BioyondV1RPC(BaseRequest): params={ "apiKey": self.api_key, "requestTime": self.get_current_time_iso8601(), - "data": {}, + "data": 0, }) if not response or response['code'] != 1: return [] @@ -273,12 +314,19 @@ class BioyondV1RPC(BaseRequest): if not response or response['code'] != 1: return {} + + # 自动更新缓存 - 移除被删除的物料 + for name, mid in list(self.material_cache.items()): + if mid == material_id: + del self.material_cache[name] + print(f"已从缓存移除物料: {name}") + break + return response.get("data", {}) def material_outbound(self, material_id: str, location_name: str, quantity: int) -> dict: """指定库位出库物料(通过库位名称)""" - # location_name 参数实际上应该直接是 location_id (UUID) - location_id = location_name + location_id = self.location_mapping.get(location_name, location_name) params = { "materialId": material_id, @@ -1104,6 +1152,10 @@ class BioyondV1RPC(BaseRequest): for detail_material in detail_materials: detail_name = detail_material.get("name") detail_id = detail_material.get("detailMaterialId") + if not detail_id: + # 尝试其他可能的字段 + detail_id = detail_material.get("id") + if detail_name and detail_id: self.material_cache[detail_name] = detail_id print(f"加载detail材料: {detail_name} -> ID: {detail_id}") @@ -1124,6 +1176,14 @@ class BioyondV1RPC(BaseRequest): print(f"从缓存找到材料: {material_name_or_id} -> ID: {material_id}") return material_id + # 如果缓存中没有,尝试刷新缓存 + print(f"缓存中未找到材料 '{material_name_or_id}',尝试刷新缓存...") + self.refresh_material_cache() + if material_name_or_id in self.material_cache: + material_id = self.material_cache[material_name_or_id] + print(f"刷新缓存后找到材料: {material_name_or_id} -> ID: {material_id}") + return material_id + print(f"警告: 未在缓存中找到材料名称 '{material_name_or_id}',将使用原值") return material_name_or_id diff --git a/unilabos/devices/workstation/bioyond_studio/config.py.deprecated b/unilabos/devices/workstation/bioyond_studio/config.py.deprecated new file mode 100644 index 0000000..cccd087 --- /dev/null +++ b/unilabos/devices/workstation/bioyond_studio/config.py.deprecated @@ -0,0 +1,329 @@ +# config.py +""" +Bioyond工作站配置文件 +包含API配置、工作流映射、物料类型映射、仓库库位映射等所有配置信息 +""" + +from unilabos.resources.bioyond.decks import BIOYOND_PolymerReactionStation_Deck + +# ============================================================================ +# 基础配置 +# ============================================================================ + +# API配置 +API_CONFIG = { + "api_key": "DE9BDDA0", + "api_host": "http://192.168.1.200:44402" +} + +# HTTP 报送服务配置 +HTTP_SERVICE_CONFIG = { + "http_service_host": "127.0.0.1", # 监听地址 + "http_service_port": 8080, # 监听端口 +} + +# Deck配置 - 反应站工作台配置 +DECK_CONFIG = BIOYOND_PolymerReactionStation_Deck(setup=True) + +# ============================================================================ +# 工作流配置 +# ============================================================================ + +# 工作流ID映射 +WORKFLOW_MAPPINGS = { + "reactor_taken_out": "3a16081e-4788-ca37-eff4-ceed8d7019d1", + "reactor_taken_in": "3a160df6-76b3-0957-9eb0-cb496d5721c6", + "Solid_feeding_vials": "3a160877-87e7-7699-7bc6-ec72b05eb5e6", + "Liquid_feeding_vials(non-titration)": "3a167d99-6158-c6f0-15b5-eb030f7d8e47", + "Liquid_feeding_solvents": "3a160824-0665-01ed-285a-51ef817a9046", + "Liquid_feeding(titration)": "3a16082a-96ac-0449-446a-4ed39f3365b6", + "liquid_feeding_beaker": "3a16087e-124f-8ddb-8ec1-c2dff09ca784", + "Drip_back": "3a162cf9-6aac-565a-ddd7-682ba1796a4a", +} + +# 工作流名称到显示名称的映射 +WORKFLOW_TO_SECTION_MAP = { + 'reactor_taken_in': '反应器放入', + 'reactor_taken_out': '反应器取出', + 'Solid_feeding_vials': '固体投料-小瓶', + 'Liquid_feeding_vials(non-titration)': '液体投料-小瓶(非滴定)', + 'Liquid_feeding_solvents': '液体投料-溶剂', + 'Liquid_feeding(titration)': '液体投料-滴定', + 'liquid_feeding_beaker': '液体投料-烧杯', + 'Drip_back': '液体回滴' +} + +# 工作流步骤ID配置 +WORKFLOW_STEP_IDS = { + "reactor_taken_in": { + "config": "60a06f85-c5b3-29eb-180f-4f62dd7e2154" + }, + "liquid_feeding_beaker": { + "liquid": "6808cda7-fee7-4092-97f0-5f9c2ffa60e3", + "observe": "1753c0de-dffc-4ee6-8458-805a2e227362" + }, + "liquid_feeding_vials_non_titration": { + "liquid": "62ea6e95-3d5d-43db-bc1e-9a1802673861", + "observe": "3a167d99-6172-b67b-5f22-a7892197142e" + }, + "liquid_feeding_solvents": { + "liquid": "1fcea355-2545-462b-b727-350b69a313bf", + "observe": "0553dfb3-9ac5-4ace-8e00-2f11029919a8" + }, + "solid_feeding_vials": { + "feeding": "f7ae7448-4f20-4c1d-8096-df6fbadd787a", + "observe": "263c7ed5-7277-426b-bdff-d6fbf77bcc05" + }, + "liquid_feeding_titration": { + "liquid": "a00ec41b-e666-4422-9c20-bfcd3cd15c54", + "observe": "ac738ff6-4c58-4155-87b1-d6f65a2c9ab5" + }, + "drip_back": { + "liquid": "371be86a-ab77-4769-83e5-54580547c48a", + "observe": "ce024b9d-bd20-47b8-9f78-ca5ce7f44cf1" + } +} + +# 工作流动作名称配置 +ACTION_NAMES = { + "reactor_taken_in": { + "config": "通量-配置", + "stirring": "反应模块-开始搅拌" + }, + "solid_feeding_vials": { + "feeding": "粉末加样模块-投料", + "observe": "反应模块-观察搅拌结果" + }, + "liquid_feeding_vials_non_titration": { + "liquid": "稀释液瓶加液位-液体投料", + "observe": "反应模块-滴定结果观察" + }, + "liquid_feeding_solvents": { + "liquid": "试剂AB放置位-试剂吸液分液", + "observe": "反应模块-观察搅拌结果" + }, + "liquid_feeding_titration": { + "liquid": "稀释液瓶加液位-稀释液吸液分液", + "observe": "反应模块-滴定结果观察" + }, + "liquid_feeding_beaker": { + "liquid": "烧杯溶液放置位-烧杯吸液分液", + "observe": "反应模块-观察搅拌结果" + }, + "drip_back": { + "liquid": "试剂AB放置位-试剂吸液分液", + "observe": "反应模块-向下滴定结果观察" + } +} + +# ============================================================================ +# 仓库配置 +# ============================================================================ +# 说明: +# - 出库和入库操作都需要UUID +WAREHOUSE_MAPPING = { + # ========== 反应站仓库 ========== + + # 堆栈1左 - 反应站左侧堆栈 (4行×4列=16个库位, A01~D04) + "堆栈1左": { + "uuid": "3a14aa17-0d49-dce4-486e-4b5c85c8b366", + "site_uuids": { + "A01": "3a14aa17-0d49-11d7-a6e1-f236b3e5e5a3", + "A02": "3a14aa17-0d49-4bc5-8836-517b75473f5f", + "A03": "3a14aa17-0d49-c2bc-6222-5cee8d2d94f8", + "A04": "3a14aa17-0d49-3ce2-8e9a-008c38d116fb", + "B01": "3a14aa17-0d49-f49c-6b66-b27f185a3b32", + "B02": "3a14aa17-0d49-cf46-df85-a979c9c9920c", + "B03": "3a14aa17-0d49-7698-4a23-f7ffb7d48ba3", + "B04": "3a14aa17-0d49-1231-99be-d5870e6478e9", + "C01": "3a14aa17-0d49-be34-6fae-4aed9d48b70b", + "C02": "3a14aa17-0d49-11d7-0897-34921dcf6b7c", + "C03": "3a14aa17-0d49-9840-0bd5-9c63c1bb2c29", + "C04": "3a14aa17-0d49-8335-3bff-01da69ea4911", + "D01": "3a14aa17-0d49-2bea-c8e5-2b32094935d5", + "D02": "3a14aa17-0d49-cff4-e9e8-5f5f0bc1ef32", + "D03": "3a14aa17-0d49-4948-cb0a-78f30d1ca9b8", + "D04": "3a14aa17-0d49-fd2f-9dfb-a29b11e84099", + }, + }, + + # 堆栈1右 - 反应站右侧堆栈 (4行×4列=16个库位, A05~D08) + "堆栈1右": { + "uuid": "3a14aa17-0d49-dce4-486e-4b5c85c8b366", + "site_uuids": { + "A05": "3a14aa17-0d49-2c61-edc8-72a8ca7192dd", + "A06": "3a14aa17-0d49-60c8-2b00-40b17198f397", + "A07": "3a14aa17-0d49-ec5b-0b75-634dce8eed25", + "A08": "3a14aa17-0d49-3ec9-55b3-f3189c4ec53d", + "B05": "3a14aa17-0d49-6a4e-abcf-4c113eaaeaad", + "B06": "3a14aa17-0d49-e3f6-2dd6-28c2e8194fbe", + "B07": "3a14aa17-0d49-11a6-b861-ee895121bf52", + "B08": "3a14aa17-0d49-9c7d-1145-d554a6e482f0", + "C05": "3a14aa17-0d49-45c4-7a34-5105bc3e2368", + "C06": "3a14aa17-0d49-867e-39ab-31b3fe9014be", + "C07": "3a14aa17-0d49-ec56-c4b4-39fd9b2131e7", + "C08": "3a14aa17-0d49-1128-d7d9-ffb1231c98c0", + "D05": "3a14aa17-0d49-e843-f961-ea173326a14b", + "D06": "3a14aa17-0d49-4d26-a985-f188359c4f8b", + "D07": "3a14aa17-0d49-223a-b520-bc092bb42fe0", + "D08": "3a14aa17-0d49-4fa3-401a-6a444e1cca22", + }, + }, + + # 站内试剂存放堆栈 + "站内试剂存放堆栈": { + "uuid": "3a14aa3b-9fab-9d8e-d1a7-828f01f51f0c", + "site_uuids": { + "A01": "3a14aa3b-9fab-adac-7b9c-e1ee446b51d5", + "A02": "3a14aa3b-9fab-ca72-febc-b7c304476c78" + } + }, + + + # 测量小瓶仓库(测密度) + "测量小瓶仓库": { + "uuid": "3a15012f-705b-c0de-3f9e-950c205f9921", + "site_uuids": { + "A01": "3a15012f-705e-0524-3161-c523b5aebc97", + "A02": "3a15012f-705e-7cd1-32ab-ad4fd1ab75c8", + "A03": "3a15012f-705e-a5d6-edac-bdbfec236260", + "B01": "3a15012f-705e-e0ee-80e0-10a6b3fc500d", + "B02": "3a15012f-705e-e499-180d-de06d60d0b21", + "B03": "3a15012f-705e-eff6-63f1-09f742096b26" + } + }, + + # 站内Tip盒堆栈 - 用于存放枪头盒 (耗材) + "站内Tip盒堆栈": { + "uuid": "3a14aa3a-2d3c-b5c1-9ddf-7c4a957d459a", + "site_uuids": { + "A01": "3a14aa3a-2d3d-e700-411a-0ddf85e1f18a", + "A02": "3a14aa3a-2d3d-a7ce-099a-d5632fdafa24", + "A03": "3a14aa3a-2d3d-bdf6-a702-c60b38b08501", + "B01": "3a14aa3a-2d3d-d704-f076-2a8d5bc72cb8", + "B02": "3a14aa3a-2d3d-c350-2526-0778d173a5ac", + "B03": "3a14aa3a-2d3d-bc38-b356-f0de2e44e0c7" + } + }, + # ========== 配液站仓库 ========== + "粉末堆栈": { + "uuid": "3a14198e-6928-121f-7ca6-88ad3ae7e6a0", + "site_uuids": { + "A01": "3a14198e-6929-31f0-8a22-0f98f72260df", + "A02": "3a14198e-6929-4379-affa-9a2935c17f99", + "A03": "3a14198e-6929-56da-9a1c-7f5fbd4ae8af", + "A04": "3a14198e-6929-5e99-2b79-80720f7cfb54", + "B01": "3a14198e-6929-f525-9a1b-1857552b28ee", + "B02": "3a14198e-6929-bf98-0fd5-26e1d68bf62d", + "B03": "3a14198e-6929-2d86-a468-602175a2b5aa", + "B04": "3a14198e-6929-1a98-ae57-e97660c489ad", + "C01": "3a14198e-6929-46fe-841e-03dd753f1e4a", + "C02": "3a14198e-6929-72ac-32ce-9b50245682b8", + "C03": "3a14198e-6929-8a0b-b686-6f4a2955c4e2", + "C04": "3a14198e-6929-a0ec-5f15-c0f9f339f963", + "D01": "3a14198e-6929-1bc9-a9bd-3b7ca66e7f95", + "D02": "3a14198e-6929-3bd8-e6c7-4a9fd93be118", + "D03": "3a14198e-6929-dde1-fc78-34a84b71afdf", + "D04": "3a14198e-6929-7ac8-915a-fea51cb2e884" + } + }, + "溶液堆栈": { + "uuid": "3a14198e-d723-2c13-7d12-50143e190a23", + "site_uuids": { + "A01": "3a14198e-d724-e036-afdc-2ae39a7f3383", + "A02": "3a14198e-d724-d818-6d4f-5725191a24b5", + "A03": "3a14198e-d724-b5bb-adf3-4c5a0da6fb31", + "A04": "3a14198e-d724-d378-d266-2508a224a19f", + "B01": "3a14198e-d724-afa4-fc82-0ac8a9016791", + "B02": "3a14198e-d724-be8a-5e0b-012675e195c6", + "B03": "3a14198e-d724-ab4e-48cb-817c3c146707", + "B04": "3a14198e-d724-f56e-468b-0110a8feb36a", + "C01": "3a14198e-d724-ca48-bb9e-7e85751e55b6", + "C02": "3a14198e-d724-cc1e-5c2c-228a130f40a8", + "C03": "3a14198e-d724-7f18-1853-39d0c62e1d33", + "C04": "3a14198e-d724-0cf1-dea9-a1f40fe7e13c", + "D01": "3a14198e-d724-df6d-5e32-5483b3cab583", + "D02": "3a14198e-d724-1e28-c885-574c3df468d0", + "D03": "3a14198e-d724-28a2-a760-baa896f46b66", + "D04": "3a14198e-d724-0ddd-9654-f9352a421de9" + } + }, + "试剂堆栈": { + "uuid": "3a14198c-c2cc-0290-e086-44a428fba248", + "site_uuids": { + "A01": "3a14198c-c2cf-8b40-af28-b467808f1c36", # x=1, y=1, code=0001-0001 + "A02": "3a14198c-c2d0-dc7d-b8d0-e1d88cee3094", # x=1, y=2, code=0001-0002 + "A03": "3a14198c-c2d0-354f-39ad-642e1a72fcb8", # x=1, y=3, code=0001-0003 + "A04": "3a14198c-c2d0-725e-523d-34c037ac2440", # x=1, y=4, code=0001-0004 + "B01": "3a14198c-c2d0-f3e7-871a-e470d144296f", # x=2, y=1, code=0001-0005 + "B02": "3a14198c-c2d0-2070-efc8-44e245f10c6f", # x=2, y=2, code=0001-0006 + "B03": "3a14198c-c2d0-1559-105d-0ea30682cab4", # x=2, y=3, code=0001-0007 + "B04": "3a14198c-c2d0-efce-0939-69ca5a7dfd39" # x=2, y=4, code=0001-0008 + } + } +} + +# ============================================================================ +# 物料类型配置 +# ============================================================================ +# 说明: +# - 格式: PyLabRobot资源类型名称 → Bioyond系统typeId的UUID +# - 这个映射基于 resource.model 属性 (不是显示名称!) +# - UUID为空表示该类型暂未在Bioyond系统中定义 +MATERIAL_TYPE_MAPPINGS = { + # ================================================配液站资源============================================================ + # ==================================================样品=============================================================== + "BIOYOND_PolymerStation_1FlaskCarrier": ("烧杯", "3a14196b-24f2-ca49-9081-0cab8021bf1a"), # 配液站-样品-烧杯 + "BIOYOND_PolymerStation_1BottleCarrier": ("试剂瓶", "3a14196b-8bcf-a460-4f74-23f21ca79e72"), # 配液站-样品-试剂瓶 + "BIOYOND_PolymerStation_6StockCarrier": ("分装板", "3a14196e-5dfe-6e21-0c79-fe2036d052c4"), # 配液站-样品-分装板 + "BIOYOND_PolymerStation_Liquid_Vial": ("10%分装小瓶", "3a14196c-76be-2279-4e22-7310d69aed68"), # 配液站-样品-分装板-第一排小瓶 + "BIOYOND_PolymerStation_Solid_Vial": ("90%分装小瓶", "3a14196c-cdcf-088d-dc7d-5cf38f0ad9ea"), # 配液站-样品-分装板-第二排小瓶 + # ==================================================试剂=============================================================== + "BIOYOND_PolymerStation_8StockCarrier": ("样品板", "3a14196e-b7a0-a5da-1931-35f3000281e9"), # 配液站-试剂-样品板(8孔) + "BIOYOND_PolymerStation_Solid_Stock": ("样品瓶", "3a14196a-cf7d-8aea-48d8-b9662c7dba94"), # 配液站-试剂-样品板-样品瓶 + +} + +# ============================================================================ +# 动态生成的库位UUID映射(从WAREHOUSE_MAPPING中提取) +# ============================================================================ + +LOCATION_MAPPING = {} +for warehouse_name, warehouse_config in WAREHOUSE_MAPPING.items(): + if "site_uuids" in warehouse_config: + LOCATION_MAPPING.update(warehouse_config["site_uuids"]) + +# ============================================================================ +# 物料默认参数配置 +# ============================================================================ +# 说明: +# - 为特定物料名称自动添加默认参数(如密度、分子量、单位等) +# - 格式: 物料名称 → {参数字典} +# - 在创建或更新物料时,会自动合并这些参数到 Parameters 字段 +# - unit: 物料的计量单位(会用于 unit 字段) +# - density/densityUnit: 密度信息(会添加到 Parameters 中) + +MATERIAL_DEFAULT_PARAMETERS = { + # 溶剂类 + "NMP": { + "unit": "毫升", + "density": "1.03", + "densityUnit": "g/mL", + "description": "N-甲基吡咯烷酮 (N-Methyl-2-pyrrolidone)" + }, + # 可以继续添加其他物料... +} + +# ============================================================================ +# 物料类型默认参数配置 +# ============================================================================ +# 说明: +# - 为特定物料类型(UUID)自动添加默认参数 +# - 格式: Bioyond类型UUID → {参数字典} +# - 优先级低于按名称匹配的配置 +MATERIAL_TYPE_PARAMETERS = { + # 示例: + # "3a14196b-24f2-ca49-9081-0cab8021bf1a": { # 烧杯 + # "unit": "个" + # } +} diff --git a/unilabos/devices/workstation/bioyond_studio/dispensing_station.py b/unilabos/devices/workstation/bioyond_studio/dispensing_station/dispensing_station.py similarity index 79% rename from unilabos/devices/workstation/bioyond_studio/dispensing_station.py rename to unilabos/devices/workstation/bioyond_studio/dispensing_station/dispensing_station.py index 6d51272..dc48487 100644 --- a/unilabos/devices/workstation/bioyond_studio/dispensing_station.py +++ b/unilabos/devices/workstation/bioyond_studio/dispensing_station/dispensing_station.py @@ -4,7 +4,8 @@ import time from typing import Optional, Dict, Any, List from typing_extensions import TypedDict import requests -from unilabos.devices.workstation.bioyond_studio.config import API_CONFIG +import pint + from unilabos.devices.workstation.bioyond_studio.bioyond_rpc import BioyondException from unilabos.devices.workstation.bioyond_studio.station import BioyondWorkstation @@ -25,13 +26,89 @@ class ComputeExperimentDesignReturn(TypedDict): class BioyondDispensingStation(BioyondWorkstation): def __init__( self, - config, - # 桌子 - deck, - *args, + config: dict = None, + deck=None, + protocol_type=None, **kwargs, - ): - super().__init__(config, deck, *args, **kwargs) + ): + """初始化配液站 + + Args: + config: 配置字典,应包含material_type_mappings等配置 + deck: Deck对象 + protocol_type: 协议类型(由ROS系统传递,此处忽略) + **kwargs: 其他可能的参数 + """ + if config is None: + config = {} + + # 将 kwargs 合并到 config 中 (处理扁平化配置如 api_key) + config.update(kwargs) + + if deck is None and config: + deck = config.get('deck') + + # 🔧 修复: 确保 Deck 上的 warehouses 具有正确的 UUID (必须在 super().__init__ 之前执行,因为父类会触发同步) + # 从配置中读取 warehouse_mapping,并应用到实际的 deck 资源上 + if config and "warehouse_mapping" in config and deck: + warehouse_mapping = config["warehouse_mapping"] + print(f"正在根据配置更新 Deck warehouse UUIDs... (共有 {len(warehouse_mapping)} 个配置)") + + user_deck = deck + # 初始化 warehouses 字典 + if not hasattr(user_deck, "warehouses") or user_deck.warehouses is None: + user_deck.warehouses = {} + + # 1. 尝试从 children 中查找匹配的资源 + for child in user_deck.children: + # 简单判断: 如果名字在 mapping 中,就认为是 warehouse + if child.name in warehouse_mapping: + user_deck.warehouses[child.name] = child + print(f" - 从子资源中找到 warehouse: {child.name}") + + # 2. 如果还是没找到,且 Deck 类有 setup 方法,尝试调用 setup (针对 Deck 对象正确但未初始化的情况) + if not user_deck.warehouses and hasattr(user_deck, "setup"): + print(" - 尝试调用 deck.setup() 初始化仓库...") + try: + user_deck.setup() + # setup 后重新检查 + if hasattr(user_deck, "warehouses") and user_deck.warehouses: + print(f" - setup() 成功,找到 {len(user_deck.warehouses)} 个仓库") + except Exception as e: + print(f" - 调用 setup() 失败: {e}") + + # 3. 如果仍然为空,可能需要手动创建 (仅针对特定已知的 Deck 类型进行补救,这里暂时只打印警告) + if not user_deck.warehouses: + print(" - ⚠️ 仍然无法找到任何 warehouse 资源!") + + for wh_name, wh_config in warehouse_mapping.items(): + target_uuid = wh_config.get("uuid") + + # 尝试在 deck.warehouses 中查找 + wh_resource = None + if hasattr(user_deck, "warehouses") and wh_name in user_deck.warehouses: + wh_resource = user_deck.warehouses[wh_name] + + # 如果没找到,尝试在所有子资源中查找 + if not wh_resource: + wh_resource = user_deck.get_resource(wh_name) + + if wh_resource: + if target_uuid: + current_uuid = getattr(wh_resource, "uuid", None) + print(f"✅ 更新仓库 '{wh_name}' UUID: {current_uuid} -> {target_uuid}") + + # 动态添加 uuid 属性 + wh_resource.uuid = target_uuid + # 同时也确保 category 正确,避免 graphio 识别错误 + # wh_resource.category = "warehouse" + else: + print(f"⚠️ 仓库 '{wh_name}' 在配置中没有 UUID") + else: + print(f"❌ 在 Deck 中未找到配置的仓库: '{wh_name}'") + + super().__init__(bioyond_config=config, deck=deck) + # self.config = config # self.api_key = config["api_key"] # self.host = config["api_host"] @@ -43,6 +120,41 @@ class BioyondDispensingStation(BioyondWorkstation): # 用于跟踪任务完成状态的字典: {orderCode: {status, order_id, timestamp}} self.order_completion_status = {} + # 初始化 pint 单位注册表 + self.ureg = pint.UnitRegistry() + + # 化合物信息 + self.compound_info = { + "MolWt": { + "MDA": 108.14 * self.ureg.g / self.ureg.mol, + "TDA": 122.16 * self.ureg.g / self.ureg.mol, + "PAPP": 521.62 * self.ureg.g / self.ureg.mol, + "BTDA": 322.23 * self.ureg.g / self.ureg.mol, + "BPDA": 294.22 * self.ureg.g / self.ureg.mol, + "6FAP": 366.26 * self.ureg.g / self.ureg.mol, + "PMDA": 218.12 * self.ureg.g / self.ureg.mol, + "MPDA": 108.14 * self.ureg.g / self.ureg.mol, + "SIDA": 248.51 * self.ureg.g / self.ureg.mol, + "ODA": 200.236 * self.ureg.g / self.ureg.mol, + "4,4'-ODA": 200.236 * self.ureg.g / self.ureg.mol, + "134": 292.34 * self.ureg.g / self.ureg.mol, + }, + "FuncGroup": { + "MDA": "Amine", + "TDA": "Amine", + "PAPP": "Amine", + "BTDA": "Anhydride", + "BPDA": "Anhydride", + "6FAP": "Amine", + "MPDA": "Amine", + "SIDA": "Amine", + "PMDA": "Anhydride", + "ODA": "Amine", + "4,4'-ODA": "Amine", + "134": "Amine", + } + } + def _post_project_api(self, endpoint: str, data: Any) -> Dict[str, Any]: """项目接口通用POST调用 @@ -54,7 +166,7 @@ class BioyondDispensingStation(BioyondWorkstation): dict: 服务端响应,失败时返回 {code:0,message,...} """ request_data = { - "apiKey": API_CONFIG["api_key"], + "apiKey": self.bioyond_config["api_key"], "requestTime": self.hardware_interface.get_current_time_iso8601(), "data": data } @@ -85,7 +197,7 @@ class BioyondDispensingStation(BioyondWorkstation): dict: 服务端响应,失败时返回 {code:0,message,...} """ request_data = { - "apiKey": API_CONFIG["api_key"], + "apiKey": self.bioyond_config["api_key"], "requestTime": self.hardware_interface.get_current_time_iso8601(), "data": data } @@ -118,20 +230,22 @@ class BioyondDispensingStation(BioyondWorkstation): ratio = json.loads(ratio) except Exception: ratio = {} - root = str(Path(__file__).resolve().parents[3]) - if root not in sys.path: - sys.path.append(root) - try: - mod = importlib.import_module("tem.compute") - except Exception as e: - raise BioyondException(f"无法导入计算模块: {e}") try: wp = float(wt_percent) if isinstance(wt_percent, str) else wt_percent mt = float(m_tot) if isinstance(m_tot, str) else m_tot tp = float(titration_percent) if isinstance(titration_percent, str) else titration_percent except Exception as e: raise BioyondException(f"参数解析失败: {e}") - res = mod.generate_experiment_design(ratio=ratio, wt_percent=wp, m_tot=mt, titration_percent=tp) + + # 2. 调用内部计算方法 + res = self._generate_experiment_design( + ratio=ratio, + wt_percent=wp, + m_tot=mt, + titration_percent=tp + ) + + # 3. 构造返回结果 out = { "solutions": res.get("solutions", []), "titration": res.get("titration", {}), @@ -140,11 +254,248 @@ class BioyondDispensingStation(BioyondWorkstation): "return_info": json.dumps(res, ensure_ascii=False) } return out + except BioyondException: raise except Exception as e: raise BioyondException(str(e)) + def _generate_experiment_design( + self, + ratio: dict, + wt_percent: float = 0.25, + m_tot: float = 70, + titration_percent: float = 0.03, + ) -> dict: + """内部方法:生成实验设计 + + 根据FuncGroup自动区分二胺和二酐,每种二胺单独配溶液,严格按照ratio顺序投料。 + + 参数: + ratio: 化合物配比字典,格式: {"compound_name": ratio_value} + wt_percent: 固体重量百分比 + m_tot: 反应混合物总质量(g) + titration_percent: 滴定溶液百分比 + + 返回: + 包含实验设计详细参数的字典 + """ + # 溶剂密度 + ρ_solvent = 1.03 * self.ureg.g / self.ureg.ml + # 二酐溶解度 + solubility = 0.02 * self.ureg.g / self.ureg.ml + # 投入固体时最小溶剂体积 + V_min = 30 * self.ureg.ml + m_tot = m_tot * self.ureg.g + + # 保持ratio中的顺序 + compound_names = list(ratio.keys()) + compound_ratios = list(ratio.values()) + + # 验证所有化合物是否在 compound_info 中定义 + undefined_compounds = [name for name in compound_names if name not in self.compound_info["MolWt"]] + if undefined_compounds: + available = list(self.compound_info["MolWt"].keys()) + raise ValueError( + f"以下化合物未在 compound_info 中定义: {undefined_compounds}。" + f"可用的化合物: {available}" + ) + + # 获取各化合物的分子量和官能团类型 + molecular_weights = [self.compound_info["MolWt"][name] for name in compound_names] + func_groups = [self.compound_info["FuncGroup"][name] for name in compound_names] + + # 记录化合物信息用于调试 + self.hardware_interface._logger.info(f"化合物名称: {compound_names}") + self.hardware_interface._logger.info(f"官能团类型: {func_groups}") + + # 按原始顺序分离二胺和二酐 + ordered_compounds = list(zip(compound_names, compound_ratios, molecular_weights, func_groups)) + diamine_compounds = [(name, ratio_val, mw, i) for i, (name, ratio_val, mw, fg) in enumerate(ordered_compounds) if fg == "Amine"] + anhydride_compounds = [(name, ratio_val, mw, i) for i, (name, ratio_val, mw, fg) in enumerate(ordered_compounds) if fg == "Anhydride"] + + if not diamine_compounds or not anhydride_compounds: + raise ValueError( + f"需要同时包含二胺(Amine)和二酐(Anhydride)化合物。" + f"当前二胺: {[c[0] for c in diamine_compounds]}, " + f"当前二酐: {[c[0] for c in anhydride_compounds]}" + ) + + # 计算加权平均分子量 (基于摩尔比) + total_molar_ratio = sum(compound_ratios) + weighted_molecular_weight = sum(ratio_val * mw for ratio_val, mw in zip(compound_ratios, molecular_weights)) + + # 取最后一个二酐用于滴定 + titration_anhydride = anhydride_compounds[-1] + solid_anhydrides = anhydride_compounds[:-1] if len(anhydride_compounds) > 1 else [] + + # 二胺溶液配制参数 - 每种二胺单独配制 + diamine_solutions = [] + total_diamine_volume = 0 * self.ureg.ml + + # 计算反应物的总摩尔量 + n_reactant = m_tot * wt_percent / weighted_molecular_weight + + for name, ratio_val, mw, order_index in diamine_compounds: + # 跳过 SIDA + if name == "SIDA": + continue + + # 计算该二胺需要的摩尔数 + n_diamine_needed = n_reactant * ratio_val + + # 二胺溶液配制参数 (每种二胺固定配制参数) + m_diamine_solid = 5.0 * self.ureg.g # 每种二胺固体质量 + V_solvent_for_this = 20 * self.ureg.ml # 每种二胺溶剂体积 + m_solvent_for_this = ρ_solvent * V_solvent_for_this + + # 计算该二胺溶液的浓度 + c_diamine = (m_diamine_solid / mw) / V_solvent_for_this + + # 计算需要移取的溶液体积 + V_diamine_needed = n_diamine_needed / c_diamine + + diamine_solutions.append({ + "name": name, + "order": order_index, + "solid_mass": m_diamine_solid.magnitude, + "solvent_volume": V_solvent_for_this.magnitude, + "concentration": c_diamine.magnitude, + "volume_needed": V_diamine_needed.magnitude, + "molar_ratio": ratio_val + }) + + total_diamine_volume += V_diamine_needed + + # 按原始顺序排序 + diamine_solutions.sort(key=lambda x: x["order"]) + + # 计算滴定二酐的质量 + titration_name, titration_ratio, titration_mw, _ = titration_anhydride + m_titration_anhydride = n_reactant * titration_ratio * titration_mw + m_titration_90 = m_titration_anhydride * (1 - titration_percent) + m_titration_10 = m_titration_anhydride * titration_percent + + # 计算其他固体二酐的质量 (按顺序) + solid_anhydride_masses = [] + for name, ratio_val, mw, order_index in solid_anhydrides: + mass = n_reactant * ratio_val * mw + solid_anhydride_masses.append({ + "name": name, + "order": order_index, + "mass": mass.magnitude, + "molar_ratio": ratio_val + }) + + # 按原始顺序排序 + solid_anhydride_masses.sort(key=lambda x: x["order"]) + + # 计算溶剂用量 + total_diamine_solution_mass = sum( + sol["volume_needed"] * ρ_solvent for sol in diamine_solutions + ) * self.ureg.ml + + # 预估滴定溶剂量、计算补加溶剂量 + m_solvent_titration = m_titration_10 / solubility * ρ_solvent + m_solvent_add = m_tot * (1 - wt_percent) - total_diamine_solution_mass - m_solvent_titration + + # 检查最小溶剂体积要求 + total_liquid_volume = (total_diamine_solution_mass + m_solvent_add) / ρ_solvent + m_tot_min = V_min / total_liquid_volume * m_tot + + # 如果需要,按比例放大 + scale_factor = 1.0 + if m_tot_min > m_tot: + scale_factor = (m_tot_min / m_tot).magnitude + m_titration_90 *= scale_factor + m_titration_10 *= scale_factor + m_solvent_add *= scale_factor + m_solvent_titration *= scale_factor + + # 更新二胺溶液用量 + for sol in diamine_solutions: + sol["volume_needed"] *= scale_factor + + # 更新固体二酐用量 + for anhydride in solid_anhydride_masses: + anhydride["mass"] *= scale_factor + + m_tot = m_tot_min + + # 生成投料顺序 + feeding_order = [] + + # 1. 固体二酐 (按顺序) + for anhydride in solid_anhydride_masses: + feeding_order.append({ + "step": len(feeding_order) + 1, + "type": "solid_anhydride", + "name": anhydride["name"], + "amount": anhydride["mass"], + "order": anhydride["order"] + }) + + # 2. 二胺溶液 (按顺序) + for sol in diamine_solutions: + feeding_order.append({ + "step": len(feeding_order) + 1, + "type": "diamine_solution", + "name": sol["name"], + "amount": sol["volume_needed"], + "order": sol["order"] + }) + + # 3. 主要二酐粉末 + feeding_order.append({ + "step": len(feeding_order) + 1, + "type": "main_anhydride", + "name": titration_name, + "amount": m_titration_90.magnitude, + "order": titration_anhydride[3] + }) + + # 4. 补加溶剂 + if m_solvent_add > 0: + feeding_order.append({ + "step": len(feeding_order) + 1, + "type": "additional_solvent", + "name": "溶剂", + "amount": m_solvent_add.magnitude, + "order": 999 + }) + + # 5. 滴定二酐溶液 + feeding_order.append({ + "step": len(feeding_order) + 1, + "type": "titration_anhydride", + "name": f"{titration_name} 滴定液", + "amount": m_titration_10.magnitude, + "titration_solvent": m_solvent_titration.magnitude, + "order": titration_anhydride[3] + }) + + # 返回实验设计结果 + results = { + "total_mass": m_tot.magnitude, + "scale_factor": scale_factor, + "solutions": diamine_solutions, + "solids": solid_anhydride_masses, + "titration": { + "name": titration_name, + "main_portion": m_titration_90.magnitude, + "titration_portion": m_titration_10.magnitude, + "titration_solvent": m_solvent_titration.magnitude, + }, + "solvents": { + "additional_solvent": m_solvent_add.magnitude, + "total_liquid_volume": total_liquid_volume.magnitude + }, + "feeding_order": feeding_order, + "minimum_required_mass": m_tot_min.magnitude + } + + return results + # 90%10%小瓶投料任务创建方法 def create_90_10_vial_feeding_task(self, order_name: str = None, @@ -961,6 +1312,108 @@ class BioyondDispensingStation(BioyondWorkstation): 'actualVolume': actual_volume } + def _simplify_report(self, report) -> Dict[str, Any]: + """简化实验报告,只保留关键信息,去除冗余的工作流参数""" + if not isinstance(report, dict): + return report + + data = report.get('data', {}) + if not isinstance(data, dict): + return report + + # 提取关键信息 + simplified = { + 'name': data.get('name'), + 'code': data.get('code'), + 'requester': data.get('requester'), + 'workflowName': data.get('workflowName'), + 'workflowStep': data.get('workflowStep'), + 'requestTime': data.get('requestTime'), + 'startPreparationTime': data.get('startPreparationTime'), + 'completeTime': data.get('completeTime'), + 'useTime': data.get('useTime'), + 'status': data.get('status'), + 'statusName': data.get('statusName'), + } + + # 提取物料信息(简化版) + pre_intakes = data.get('preIntakes', []) + if pre_intakes and isinstance(pre_intakes, list): + first_intake = pre_intakes[0] + sample_materials = first_intake.get('sampleMaterials', []) + + # 简化物料信息 + simplified_materials = [] + for material in sample_materials: + if isinstance(material, dict): + mat_info = { + 'materialName': material.get('materialName'), + 'materialTypeName': material.get('materialTypeName'), + 'materialCode': material.get('materialCode'), + 'materialLocation': material.get('materialLocation'), + } + + # 解析parameters中的关键信息(如密度、加料历史等) + params_str = material.get('parameters', '{}') + try: + params = json.loads(params_str) if isinstance(params_str, str) else params_str + if isinstance(params, dict): + # 只保留关键参数 + if 'density' in params: + mat_info['density'] = params['density'] + if 'feedingHistory' in params: + mat_info['feedingHistory'] = params['feedingHistory'] + if 'liquidVolume' in params: + mat_info['liquidVolume'] = params['liquidVolume'] + if 'm_diamine_tot' in params: + mat_info['m_diamine_tot'] = params['m_diamine_tot'] + if 'wt_diamine' in params: + mat_info['wt_diamine'] = params['wt_diamine'] + except: + pass + + simplified_materials.append(mat_info) + + simplified['sampleMaterials'] = simplified_materials + + # 提取extraProperties中的实际值 + extra_props = first_intake.get('extraProperties', {}) + if isinstance(extra_props, dict): + simplified_extra = {} + for key, value in extra_props.items(): + try: + parsed_value = json.loads(value) if isinstance(value, str) else value + simplified_extra[key] = parsed_value + except: + simplified_extra[key] = value + simplified['extraProperties'] = simplified_extra + + return { + 'data': simplified, + 'code': report.get('code'), + 'message': report.get('message'), + 'timestamp': report.get('timestamp') + } + + def scheduler_start(self) -> dict: + """启动调度器 - 启动Bioyond工作站的任务调度器,开始执行队列中的任务 + + Returns: + dict: 包含return_info的字典,return_info为整型(1=成功) + + Raises: + BioyondException: 调度器启动失败时抛出异常 + """ + result = self.hardware_interface.scheduler_start() + self.hardware_interface._logger.info(f"调度器启动结果: {result}") + + if result != 1: + error_msg = "启动调度器失败: 有未处理错误,调度无法启动。请检查Bioyond系统状态。" + self.hardware_interface._logger.error(error_msg) + raise BioyondException(error_msg) + + return {"return_info": result} + # 等待多个任务完成并获取实验报告 def wait_for_multiple_orders_and_get_reports(self, batch_create_result: str = None, @@ -1002,7 +1455,12 @@ class BioyondDispensingStation(BioyondWorkstation): # 验证batch_create_result参数 if not batch_create_result or batch_create_result == "": - raise BioyondException("batch_create_result参数为空,请确保从batch_create节点正确连接handle") + raise BioyondException( + "batch_create_result参数为空,请确保:\n" + "1. batch_create节点与wait节点之间正确连接了handle\n" + "2. batch_create节点成功执行并返回了结果\n" + "3. 检查上游batch_create任务是否成功创建了订单" + ) # 解析batch_create_result JSON对象 try: @@ -1031,7 +1489,17 @@ class BioyondDispensingStation(BioyondWorkstation): # 验证提取的数据 if not order_codes: - raise BioyondException("batch_create_result中未找到order_codes字段或为空") + self.hardware_interface._logger.error( + f"batch_create任务未生成任何订单。batch_create_result内容: {batch_create_result}" + ) + raise BioyondException( + "batch_create_result中未找到order_codes或为空。\n" + "可能的原因:\n" + "1. batch_create任务执行失败(检查任务是否报错)\n" + "2. 物料配置问题(如'物料样品板分配失败')\n" + "3. Bioyond系统状态异常\n" + f"请检查batch_create任务的执行结果" + ) if not order_ids: raise BioyondException("batch_create_result中未找到order_ids字段或为空") @@ -1114,6 +1582,8 @@ class BioyondDispensingStation(BioyondWorkstation): self.hardware_interface._logger.info( f"成功获取任务 {order_code} 的实验报告" ) + # 简化报告,去除冗余信息 + report = self._simplify_report(report) reports.append({ "order_code": order_code, @@ -1288,7 +1758,7 @@ class BioyondDispensingStation(BioyondWorkstation): f"开始执行批量物料转移: {len(transfer_groups)}组任务 -> {target_device_id}" ) - from .config import WAREHOUSE_MAPPING + warehouse_mapping = self.bioyond_config.get("warehouse_mapping", {}) results = [] successful_count = 0 failed_count = 0 diff --git a/unilabos/devices/workstation/bioyond_studio/reaction_station.py b/unilabos/devices/workstation/bioyond_studio/reaction_station/reaction_station.py similarity index 55% rename from unilabos/devices/workstation/bioyond_studio/reaction_station.py rename to unilabos/devices/workstation/bioyond_studio/reaction_station/reaction_station.py index ffb83fd..c7f3194 100644 --- a/unilabos/devices/workstation/bioyond_studio/reaction_station.py +++ b/unilabos/devices/workstation/bioyond_studio/reaction_station/reaction_station.py @@ -2,17 +2,15 @@ import json import time import requests from typing import List, Dict, Any +import json +import requests from pathlib import Path from datetime import datetime from unilabos.devices.workstation.bioyond_studio.station import BioyondWorkstation from unilabos.devices.workstation.bioyond_studio.bioyond_rpc import MachineState from unilabos.ros.msgs.message_converter import convert_to_ros_msg, Float64, String -from unilabos.devices.workstation.bioyond_studio.config import ( - WORKFLOW_STEP_IDS, - WORKFLOW_TO_SECTION_MAP, - ACTION_NAMES -) -from unilabos.devices.workstation.bioyond_studio.config import API_CONFIG + + class BioyondReactor: @@ -49,21 +47,84 @@ class BioyondReactor: class BioyondReactionStation(BioyondWorkstation): """Bioyond反应站类 - 继承自BioyondWorkstation,提供反应站特定的业务方法 + 继承自BioyondWorkstation,提供反应站特定的业务方法 """ def __init__(self, config: dict = None, deck=None, protocol_type=None, **kwargs): """初始化反应站 Args: - config: 配置字典,应包含workflow_mappings等配置 + config: 配置字典,应包含workflow_mappings等配置 deck: Deck对象 - protocol_type: 协议类型(由ROS系统传递,此处忽略) + protocol_type: 协议类型(由ROS系统传递,此处忽略) **kwargs: 其他可能的参数 """ + if config is None: + config = {} + + # 将 kwargs 合并到 config 中 (处理扁平化配置如 api_key) + config.update(kwargs) + if deck is None and config: deck = config.get('deck') + # 🔧 修复: 确保 Deck 上的 warehouses 具有正确的 UUID (必须在 super().__init__ 之前执行,因为父类会触发同步) + # 从配置中读取 warehouse_mapping,并应用到实际的 deck 资源上 + if config and "warehouse_mapping" in config and deck: + warehouse_mapping = config["warehouse_mapping"] + print(f"正在根据配置更新 Deck warehouse UUIDs... (共有 {len(warehouse_mapping)} 个配置)") + + user_deck = deck + # 初始化 warehouses 字典 + if not hasattr(user_deck, "warehouses") or user_deck.warehouses is None: + user_deck.warehouses = {} + + # 1. 尝试从 children 中查找匹配的资源 + for child in user_deck.children: + # 简单判断: 如果名字在 mapping 中,就认为是 warehouse + if child.name in warehouse_mapping: + user_deck.warehouses[child.name] = child + print(f" - 从子资源中找到 warehouse: {child.name}") + + # 2. 如果还是没找到,且 Deck 类有 setup 方法,尝试调用 setup (针对 Deck 对象正确但未初始化的情况) + if not user_deck.warehouses and hasattr(user_deck, "setup"): + print(" - 尝试调用 deck.setup() 初始化仓库...") + try: + user_deck.setup() + # setup 后重新检查 + if hasattr(user_deck, "warehouses") and user_deck.warehouses: + print(f" - setup() 成功,找到 {len(user_deck.warehouses)} 个仓库") + except Exception as e: + print(f" - 调用 setup() 失败: {e}") + + # 3. 如果仍然为空,可能需要手动创建 (仅针对特定已知的 Deck 类型进行补救,这里暂时只打印警告) + if not user_deck.warehouses: + print(" - ⚠️ 仍然无法找到任何 warehouse 资源!") + + for wh_name, wh_config in warehouse_mapping.items(): + target_uuid = wh_config.get("uuid") + + # 尝试在 deck.warehouses 中查找 + wh_resource = None + if hasattr(user_deck, "warehouses") and wh_name in user_deck.warehouses: + wh_resource = user_deck.warehouses[wh_name] + + # 如果没找到,尝试在所有子资源中查找 + if not wh_resource: + wh_resource = user_deck.get_resource(wh_name) + + if wh_resource: + if target_uuid: + current_uuid = getattr(wh_resource, "uuid", None) + print(f"✅ 更新仓库 '{wh_name}' UUID: {current_uuid} -> {target_uuid}") + wh_resource.uuid = target_uuid + else: + print(f"⚠️ 仓库 '{wh_name}' 在配置中没有 UUID") + else: + print(f"❌ 在 Deck 中未找到配置的仓库: '{wh_name}'") + + super().__init__(bioyond_config=config, deck=deck) + print(f"BioyondReactionStation初始化 - config包含workflow_mappings: {'workflow_mappings' in (config or {})}") if config and 'workflow_mappings' in config: print(f"workflow_mappings内容: {config['workflow_mappings']}") @@ -86,6 +147,147 @@ class BioyondReactionStation(BioyondWorkstation): self._frame_to_reactor_id = {1: "reactor_1", 2: "reactor_2", 3: "reactor_3", 4: "reactor_4", 5: "reactor_5"} + # 用于缓存从 Bioyond 查询的工作流序列 + self._cached_workflow_sequence = [] + # 用于缓存待处理的时间约束 + self.pending_time_constraints = [] + + # 从配置中获取 action_names + self.action_names = self.bioyond_config.get("action_names", {}) + + # 动态获取工作流步骤ID + self.workflow_step_ids = self._fetch_workflow_step_ids() + + def _fetch_workflow_step_ids(self) -> Dict[str, Dict[str, str]]: + """动态获取工作流步骤ID""" + print("正在从LIMS获取最新工作流步骤ID...") + + api_host = self.bioyond_config.get("api_host") + api_key = self.bioyond_config.get("api_key") + + if not api_host or not api_key: + print("API配置缺失,无法动态获取工作流步骤ID") + return {} + + def call_api(endpoint, data=None): + url = f"{api_host}{endpoint}" + payload = { + "apiKey": api_key, + "requestTime": datetime.now().isoformat(), + "data": data if data else {} + } + try: + response = requests.post(url, json=payload, headers={"Content-Type": "application/json"}, timeout=5) + return response.json() + except Exception as e: + print(f"调用API {endpoint} 失败: {e}") + return None + + # 1. 获取工作流列表 + resp = call_api("/api/lims/workflow/work-flow-list", {"type": 2, "includeDetail": True}) + if not resp: + print("无法获取工作流列表") + return {} + + workflows = resp.get("data", []) + if isinstance(workflows, dict): + if "list" in workflows: + workflows = workflows["list"] + elif "items" in workflows: + workflows = workflows["items"] + + if not workflows: + print("工作流列表为空") + return {} + + new_ids = {} + + #从配置中获取workflow_to_section_map + workflow_to_section_map = self.bioyond_config.get("workflow_to_section_map", {}) + + # 2. 遍历映射表 + for internal_name, section_name in workflow_to_section_map.items(): + # 查找对应的工作流对象 + wf_obj = next((w for w in workflows if w.get("name") == section_name), None) + if not wf_obj: + # print(f"未找到工作流: {section_name}") + continue + + # 获取 subWorkflowId + sub_wf_id = None + if wf_obj.get("subWorkflows"): + sub_wfs = wf_obj.get("subWorkflows") + if len(sub_wfs) > 0: + sub_wf_id = sub_wfs[0].get("id") + + if not sub_wf_id: + # print(f"工作流 {section_name} 没有子工作流ID") + continue + + # 3. 获取步骤参数 + step_resp = call_api("/api/lims/workflow/sub-workflow-step-parameters", sub_wf_id) + if not step_resp or not step_resp.get("data"): + # print(f"无法获取工作流 {section_name} 的步骤参数") + continue + + steps_data = step_resp.get("data", {}) + step_name_to_id = {} + + if isinstance(steps_data, dict): + for s_id, step_list in steps_data.items(): + if isinstance(step_list, list): + for step in step_list: + s_name = step.get("name") + if s_name: + step_name_to_id[s_name] = s_id + + # 4. 匹配 ACTION_NAMES + target_key = internal_name + normalized_key = internal_name.lower().replace('(', '_').replace(')', '').replace('-', '_') + + if internal_name in self.action_names: + target_key = internal_name + elif normalized_key in self.action_names: + target_key = normalized_key + elif internal_name.lower() in self.action_names: + target_key = internal_name.lower() + + if target_key in self.action_names: + new_ids[target_key] = {} + for key, action_display_name in self.action_names[target_key].items(): + step_id = step_name_to_id.get(action_display_name) + if step_id: + new_ids[target_key][key] = step_id + else: + print(f"警告: 工作流 '{section_name}' 中未找到步骤 '{action_display_name}'") + + if not new_ids: + print("未能获取任何新的步骤ID,使用默认配置") + return self.bioyond_config.get("workflow_step_ids", {}) + + print("成功更新工作流步骤ID") + return new_ids + + + @property + def workflow_sequence(self) -> str: + """工作流序列属性 - 返回初始化时查询的工作流列表 + + Returns: + str: 工作流信息的 JSON 字符串 + """ + import json + return json.dumps(self._cached_workflow_sequence, ensure_ascii=False) + + @workflow_sequence.setter + def workflow_sequence(self, value: List[str]): + """设置工作流序列 + + Args: + value: 工作流 ID 列表 + """ + self._cached_workflow_sequence = value + # ==================== 工作流方法 ==================== def reactor_taken_out(self): @@ -97,6 +299,27 @@ class BioyondReactionStation(BioyondWorkstation): print(f"当前队列长度: {len(self.pending_task_params)}") return json.dumps({"suc": True}) + def scheduler_start(self) -> dict: + """启动调度器 - 启动Bioyond工作站的任务调度器,开始执行队列中的任务 + + Returns: + dict: 包含return_info的字典,return_info为整型(1=成功) + + Raises: + BioyondException: 调度器启动失败时抛出异常 + """ + from unilabos.devices.workstation.bioyond_studio.bioyond_rpc import BioyondException + + result = self.hardware_interface.scheduler_start() + self.hardware_interface._logger.info(f"调度器启动结果: {result}") + + if result != 1: + error_msg = "启动调度器失败: 有未处理错误,调度无法启动。请检查Bioyond系统状态。" + self.hardware_interface._logger.error(error_msg) + raise BioyondException(error_msg) + + return {"return_info": result} + def reactor_taken_in( self, assign_material_name: str, @@ -106,12 +329,12 @@ class BioyondReactionStation(BioyondWorkstation): """反应器放入 Args: - assign_material_name: 物料名称(不能为空) - cutoff: 粘度上限(需为有效数字字符串,默认 "900000") - temperature: 温度设定(°C,范围:-50.00 至 100.00) + assign_material_name: 物料名称(不能为空) + cutoff: 粘度上限(需为有效数字字符串,默认 "900000") + temperature: 温度设定(C,范围:-50.00 至 100.00) Returns: - str: JSON 字符串,格式为 {"suc": True} + str: JSON 字符串,格式为 {"suc": True} Raises: ValueError: 若物料名称无效或 cutoff 格式错误 @@ -131,15 +354,16 @@ class BioyondReactionStation(BioyondWorkstation): if isinstance(temperature, str): temperature = float(temperature) - step_id = WORKFLOW_STEP_IDS["reactor_taken_in"]["config"] + + step_id = self.workflow_step_ids["reactor_taken_in"]["config"] reactor_taken_in_params = { "param_values": { step_id: { - ACTION_NAMES["reactor_taken_in"]["config"]: [ + self.action_names["reactor_taken_in"]["config"]: [ {"m": 0, "n": 3, "Key": "cutoff", "Value": cutoff}, {"m": 0, "n": 3, "Key": "assignMaterialName", "Value": material_id} ], - ACTION_NAMES["reactor_taken_in"]["stirring"]: [ + self.action_names["reactor_taken_in"]["stirring"]: [ {"m": 0, "n": 3, "Key": "temperature", "Value": f"{temperature:.2f}"} ] } @@ -162,33 +386,40 @@ class BioyondReactionStation(BioyondWorkstation): """固体进料小瓶 Args: - material_id: 粉末类型ID,1=盐(21分钟),2=面粉(27分钟),3=BTDA(38分钟) + material_id: 粉末类型ID, Salt=1, Flour=2, BTDA=3 time: 观察时间(分钟) - torque_variation: 是否观察(int类型, 1=否, 2=是) + torque_variation: 是否观察(NO=1, YES=2) assign_material_name: 物料名称(用于获取试剂瓶位ID) - temperature: 温度设定(°C) + temperature: 温度设定(C) """ + # 参数映射 + material_map = {"Salt": "1", "Flour": "2", "BTDA": "3", "1": "1", "2": "2", "3": "3"} + torque_map = {"NO": "1", "YES": "2", 1: "1", 2: "2", "1": "1", "2": "2"} + + mapped_material_id = material_map.get(str(material_id), str(material_id)) + mapped_torque_variation = int(torque_map.get(str(torque_variation), "1")) + self.append_to_workflow_sequence('{"web_workflow_name": "Solid_feeding_vials"}') material_id_m = self.hardware_interface._get_material_id_by_name(assign_material_name) if assign_material_name else None if isinstance(temperature, str): temperature = float(temperature) - feeding_step_id = WORKFLOW_STEP_IDS["solid_feeding_vials"]["feeding"] - observe_step_id = WORKFLOW_STEP_IDS["solid_feeding_vials"]["observe"] + feeding_step_id = self.workflow_step_ids["solid_feeding_vials"]["feeding"] + observe_step_id = self.workflow_step_ids["solid_feeding_vials"]["observe"] solid_feeding_vials_params = { "param_values": { feeding_step_id: { - ACTION_NAMES["solid_feeding_vials"]["feeding"]: [ - {"m": 0, "n": 3, "Key": "materialId", "Value": material_id}, + self.action_names["solid_feeding_vials"]["feeding"]: [ + {"m": 0, "n": 3, "Key": "materialId", "Value": mapped_material_id}, {"m": 0, "n": 3, "Key": "assignMaterialName", "Value": material_id_m} if material_id_m else {} ] }, observe_step_id: { - ACTION_NAMES["solid_feeding_vials"]["observe"]: [ + self.action_names["solid_feeding_vials"]["observe"]: [ {"m": 1, "n": 0, "Key": "time", "Value": time}, - {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(torque_variation)}, + {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(mapped_torque_variation)}, {"m": 1, "n": 0, "Key": "temperature", "Value": f"{temperature:.2f}"} ] } @@ -196,7 +427,7 @@ class BioyondReactionStation(BioyondWorkstation): } self.pending_task_params.append(solid_feeding_vials_params) - print(f"成功添加固体进料小瓶参数: material_id={material_id}, time={time}min, torque={torque_variation}, temp={temperature:.2f}°C") + print(f"成功添加固体进料小瓶参数: material_id={material_id}, time={time}min, torque={torque_variation}, temp={temperature:.2f}C") print(f"当前队列长度: {len(self.pending_task_params)}") return json.dumps({"suc": True}) @@ -214,11 +445,18 @@ class BioyondReactionStation(BioyondWorkstation): Args: volume_formula: 分液公式(μL) assign_material_name: 物料名称 - titration_type: 是否滴定(1=否, 2=是) + titration_type: 是否滴定(NO=1, YES=2) time: 观察时间(分钟) - torque_variation: 是否观察(int类型, 1=否, 2=是) - temperature: 温度(°C) + torque_variation: 是否观察(NO=1, YES=2) + temperature: 温度(C) """ + # 参数映射 + titration_map = {"NO": "1", "YES": "2", "1": "1", "2": "2"} + torque_map = {"NO": "1", "YES": "2", 1: "1", 2: "2", "1": "1", "2": "2"} + + mapped_titration_type = titration_map.get(str(titration_type), "1") + mapped_torque_variation = int(torque_map.get(str(torque_variation), "1")) + self.append_to_workflow_sequence('{"web_workflow_name": "Liquid_feeding_vials(non-titration)"}') material_id = self.hardware_interface._get_material_id_by_name(assign_material_name) if material_id is None: @@ -227,22 +465,22 @@ class BioyondReactionStation(BioyondWorkstation): if isinstance(temperature, str): temperature = float(temperature) - liquid_step_id = WORKFLOW_STEP_IDS["liquid_feeding_vials_non_titration"]["liquid"] - observe_step_id = WORKFLOW_STEP_IDS["liquid_feeding_vials_non_titration"]["observe"] + liquid_step_id = self.workflow_step_ids["liquid_feeding_vials_non_titration"]["liquid"] + observe_step_id = self.workflow_step_ids["liquid_feeding_vials_non_titration"]["observe"] params = { "param_values": { liquid_step_id: { - ACTION_NAMES["liquid_feeding_vials_non_titration"]["liquid"]: [ + self.action_names["liquid_feeding_vials_non_titration"]["liquid"]: [ {"m": 0, "n": 3, "Key": "volumeFormula", "Value": volume_formula}, {"m": 0, "n": 3, "Key": "assignMaterialName", "Value": material_id}, - {"m": 0, "n": 3, "Key": "titrationType", "Value": titration_type} + {"m": 0, "n": 3, "Key": "titrationType", "Value": mapped_titration_type} ] }, observe_step_id: { - ACTION_NAMES["liquid_feeding_vials_non_titration"]["observe"]: [ + self.action_names["liquid_feeding_vials_non_titration"]["observe"]: [ {"m": 1, "n": 0, "Key": "time", "Value": time}, - {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(torque_variation)}, + {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(mapped_torque_variation)}, {"m": 1, "n": 0, "Key": "temperature", "Value": f"{temperature:.2f}"} ] } @@ -275,11 +513,18 @@ class BioyondReactionStation(BioyondWorkstation): "total_liquid_volume": 48.00916988195499 } 如果提供solvents,则从中提取additional_solvent并转换为μL - titration_type: 是否滴定(1=否, 2=是) + titration_type: 是否滴定(NO=1, YES=2) time: 观察时间(分钟) - torque_variation: 是否观察(int类型, 1=否, 2=是) - temperature: 温度设定(°C) + torque_variation: 是否观察(NO=1, YES=2) + temperature: 温度设定(C) """ + # 参数映射 + titration_map = {"NO": "1", "YES": "2", "1": "1", "2": "2"} + torque_map = {"NO": "1", "YES": "2", 1: "1", 2: "2", "1": "1", "2": "2"} + + mapped_titration_type = titration_map.get(str(titration_type), "1") + mapped_torque_variation = int(torque_map.get(str(torque_variation), "1")) + # 处理 volume 参数:优先使用直接传入的 volume,否则从 solvents 中提取 if not volume and solvents is not None: # 参数类型转换:如果是字符串则解析为字典 @@ -311,22 +556,22 @@ class BioyondReactionStation(BioyondWorkstation): if isinstance(temperature, str): temperature = float(temperature) - liquid_step_id = WORKFLOW_STEP_IDS["liquid_feeding_solvents"]["liquid"] - observe_step_id = WORKFLOW_STEP_IDS["liquid_feeding_solvents"]["observe"] + liquid_step_id = self.workflow_step_ids["liquid_feeding_solvents"]["liquid"] + observe_step_id = self.workflow_step_ids["liquid_feeding_solvents"]["observe"] params = { "param_values": { liquid_step_id: { - ACTION_NAMES["liquid_feeding_solvents"]["liquid"]: [ - {"m": 0, "n": 1, "Key": "titrationType", "Value": titration_type}, + self.action_names["liquid_feeding_solvents"]["liquid"]: [ + {"m": 0, "n": 1, "Key": "titrationType", "Value": mapped_titration_type}, {"m": 0, "n": 1, "Key": "volume", "Value": volume}, {"m": 0, "n": 1, "Key": "assignMaterialName", "Value": material_id} ] }, observe_step_id: { - ACTION_NAMES["liquid_feeding_solvents"]["observe"]: [ + self.action_names["liquid_feeding_solvents"]["observe"]: [ {"m": 1, "n": 0, "Key": "time", "Value": time}, - {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(torque_variation)}, + {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(mapped_torque_variation)}, {"m": 1, "n": 0, "Key": "temperature", "Value": f"{temperature:.2f}"} ] } @@ -362,10 +607,10 @@ class BioyondReactionStation(BioyondWorkstation): x_value: 手工输入的x值,格式如 "1-2-3" feeding_order_data: feeding_order JSON字符串或对象,用于获取m二酐值 extracted_actuals: 从报告提取的实际加料量JSON字符串,包含actualTargetWeigh和actualVolume - titration_type: 是否滴定(1=否, 2=是),默认2 + titration_type: 是否滴定(NO=1, YES=2),默认2 time: 观察时间(分钟) - torque_variation: 是否观察(int类型, 1=否, 2=是) - temperature: 温度(°C) + torque_variation: 是否观察(NO=1, YES=2) + temperature: 温度(C) 自动公式模板: 1000*(m二酐-x)*V二酐滴定/m二酐滴定 其中: @@ -374,6 +619,13 @@ class BioyondReactionStation(BioyondWorkstation): - x = x_value (手工输入) - m二酐 = feeding_order中type为"main_anhydride"的amount值 """ + # 参数映射 + titration_map = {"NO": "1", "YES": "2", "1": "1", "2": "2"} + torque_map = {"NO": "1", "YES": "2", 1: "1", 2: "2", "1": "1", "2": "2"} + + mapped_titration_type = titration_map.get(str(titration_type), "2") + mapped_torque_variation = int(torque_map.get(str(torque_variation), "1")) + self.append_to_workflow_sequence('{"web_workflow_name": "Liquid_feeding(titration)"}') material_id = self.hardware_interface._get_material_id_by_name(assign_material_name) if material_id is None: @@ -460,22 +712,22 @@ class BioyondReactionStation(BioyondWorkstation): elif not volume_formula: raise ValueError("必须提供 volume_formula 或 (x_value + feeding_order_data + extracted_actuals)") - liquid_step_id = WORKFLOW_STEP_IDS["liquid_feeding_titration"]["liquid"] - observe_step_id = WORKFLOW_STEP_IDS["liquid_feeding_titration"]["observe"] + liquid_step_id = self.workflow_step_ids["liquid_feeding_titration"]["liquid"] + observe_step_id = self.workflow_step_ids["liquid_feeding_titration"]["observe"] params = { "param_values": { liquid_step_id: { - ACTION_NAMES["liquid_feeding_titration"]["liquid"]: [ + self.action_names["liquid_feeding_titration"]["liquid"]: [ {"m": 0, "n": 3, "Key": "volumeFormula", "Value": volume_formula}, - {"m": 0, "n": 3, "Key": "titrationType", "Value": titration_type}, + {"m": 0, "n": 3, "Key": "titrationType", "Value": mapped_titration_type}, {"m": 0, "n": 3, "Key": "assignMaterialName", "Value": material_id} ] }, observe_step_id: { - ACTION_NAMES["liquid_feeding_titration"]["observe"]: [ + self.action_names["liquid_feeding_titration"]["observe"]: [ {"m": 1, "n": 0, "Key": "time", "Value": time}, - {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(torque_variation)}, + {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(mapped_torque_variation)}, {"m": 1, "n": 0, "Key": "temperature", "Value": f"{temperature:.2f}"} ] } @@ -518,6 +770,89 @@ class BioyondReactionStation(BioyondWorkstation): 'actualVolume': actual_volume } + def _simplify_report(self, report) -> Dict[str, Any]: + """简化实验报告,只保留关键信息,去除冗余的工作流参数""" + if not isinstance(report, dict): + return report + + data = report.get('data', {}) + if not isinstance(data, dict): + return report + + # 提取关键信息 + simplified = { + 'name': data.get('name'), + 'code': data.get('code'), + 'requester': data.get('requester'), + 'workflowName': data.get('workflowName'), + 'workflowStep': data.get('workflowStep'), + 'requestTime': data.get('requestTime'), + 'startPreparationTime': data.get('startPreparationTime'), + 'completeTime': data.get('completeTime'), + 'useTime': data.get('useTime'), + 'status': data.get('status'), + 'statusName': data.get('statusName'), + } + + # 提取物料信息(简化版) + pre_intakes = data.get('preIntakes', []) + if pre_intakes and isinstance(pre_intakes, list): + first_intake = pre_intakes[0] + sample_materials = first_intake.get('sampleMaterials', []) + + # 简化物料信息 + simplified_materials = [] + for material in sample_materials: + if isinstance(material, dict): + mat_info = { + 'materialName': material.get('materialName'), + 'materialTypeName': material.get('materialTypeName'), + 'materialCode': material.get('materialCode'), + 'materialLocation': material.get('materialLocation'), + } + + # 解析parameters中的关键信息 + params_str = material.get('parameters', '{}') + try: + params = json.loads(params_str) if isinstance(params_str, str) else params_str + if isinstance(params, dict): + # 只保留关键参数 + if 'density' in params: + mat_info['density'] = params['density'] + if 'feedingHistory' in params: + mat_info['feedingHistory'] = params['feedingHistory'] + if 'liquidVolume' in params: + mat_info['liquidVolume'] = params['liquidVolume'] + if 'm_diamine_tot' in params: + mat_info['m_diamine_tot'] = params['m_diamine_tot'] + if 'wt_diamine' in params: + mat_info['wt_diamine'] = params['wt_diamine'] + except: + pass + + simplified_materials.append(mat_info) + + simplified['sampleMaterials'] = simplified_materials + + # 提取extraProperties中的实际值 + extra_props = first_intake.get('extraProperties', {}) + if isinstance(extra_props, dict): + simplified_extra = {} + for key, value in extra_props.items(): + try: + parsed_value = json.loads(value) if isinstance(value, str) else value + simplified_extra[key] = parsed_value + except: + simplified_extra[key] = value + simplified['extraProperties'] = simplified_extra + + return { + 'data': simplified, + 'code': report.get('code'), + 'message': report.get('message'), + 'timestamp': report.get('timestamp') + } + def extract_actuals_from_batch_reports(self, batch_reports_result: str) -> dict: print(f"[DEBUG] extract_actuals 收到原始数据: {batch_reports_result[:500]}...") # 打印前500字符 try: @@ -671,7 +1006,12 @@ class BioyondReactionStation(BioyondWorkstation): timeout = int(timeout) if timeout else 7200 check_interval = int(check_interval) if check_interval else 10 if not batch_create_result or batch_create_result == "": - raise ValueError("batch_create_result为空") + raise ValueError( + "batch_create_result参数为空,请确保:\n" + "1. batch_create节点与wait节点之间正确连接了handle\n" + "2. batch_create节点成功执行并返回了结果\n" + "3. 检查上游batch_create任务是否成功创建了订单" + ) try: if isinstance(batch_create_result, str) and '[...]' in batch_create_result: batch_create_result = batch_create_result.replace('[...]', '[]') @@ -687,7 +1027,14 @@ class BioyondReactionStation(BioyondWorkstation): except Exception as e: raise ValueError(f"解析batch_create_result失败: {e}") if not order_codes or not order_ids: - raise ValueError("缺少order_codes或order_ids") + raise ValueError( + "batch_create_result中未找到order_codes或order_ids,或者为空。\n" + "可能的原因:\n" + "1. batch_create任务执行失败(检查任务是否报错)\n" + "2. 物料配置问题(如'物料样品板分配失败')\n" + "3. Bioyond系统状态异常\n" + f"batch_create_result内容: {batch_create_result[:200]}..." + ) if not isinstance(order_codes, list): order_codes = [order_codes] if not isinstance(order_ids, list): @@ -696,6 +1043,17 @@ class BioyondReactionStation(BioyondWorkstation): raise ValueError("order_codes与order_ids数量不匹配") total = len(order_codes) pending = {c: {"order_id": order_ids[i], "completed": False} for i, c in enumerate(order_codes)} + + # 发布初始状态事件 + for i, oc in enumerate(order_codes): + self._publish_task_status( + task_id=order_ids[i], + task_code=oc, + task_type="bioyond_workflow", + status="running", + progress=0.0 + ) + reports = [] start_time = time.time() while pending: @@ -711,6 +1069,14 @@ class BioyondReactionStation(BioyondWorkstation): "extracted": None, "elapsed_time": elapsed_time }) + # 发布超时事件 + self._publish_task_status( + task_id=pending[oc]["order_id"], + task_code=oc, + task_type="bioyond_workflow", + status="timeout", + result={"elapsed_time": elapsed_time} + ) break completed_round = [] for oc in list(pending.keys()): @@ -721,6 +1087,9 @@ class BioyondReactionStation(BioyondWorkstation): rep = self.hardware_interface.order_report(oid) if not rep: rep = {"error": "无法获取报告"} + else: + # 简化报告,去除冗余信息 + rep = self._simplify_report(rep) reports.append({ "order_code": oc, "order_id": oid, @@ -730,6 +1099,15 @@ class BioyondReactionStation(BioyondWorkstation): "extracted": self._extract_actuals_from_report(rep), "elapsed_time": elapsed_time }) + # 发布完成事件 + self._publish_task_status( + task_id=oid, + task_code=oc, + task_type="bioyond_workflow", + status="completed", + progress=1.0, + result=rep + ) completed_round.append(oc) del self.order_completion_status[oc] except Exception as e: @@ -743,6 +1121,14 @@ class BioyondReactionStation(BioyondWorkstation): "error": str(e), "elapsed_time": elapsed_time }) + # 发布错误事件 + self._publish_task_status( + task_id=oid, + task_code=oc, + task_type="bioyond_workflow", + status="error", + result={"error": str(e)} + ) completed_round.append(oc) for oc in completed_round: del pending[oc] @@ -782,9 +1168,16 @@ class BioyondReactionStation(BioyondWorkstation): assign_material_name: 物料名称(试剂瓶位) time: 观察时间(分钟) torque_variation: 是否观察(int类型, 1=否, 2=是) - titration_type: 是否滴定(1=否, 2=是) - temperature: 温度设定(°C) + titration_type: 是否滴定(NO=1, YES=2) + temperature: 温度设定(C) """ + # 参数映射 + titration_map = {"NO": "1", "YES": "2", "1": "1", "2": "2"} + torque_map = {"NO": "1", "YES": "2", 1: "1", 2: "2", "1": "1", "2": "2"} + + mapped_titration_type = titration_map.get(str(titration_type), "1") + mapped_torque_variation = int(torque_map.get(str(torque_variation), "1")) + self.append_to_workflow_sequence('{"web_workflow_name": "liquid_feeding_beaker"}') material_id = self.hardware_interface._get_material_id_by_name(assign_material_name) if material_id is None: @@ -793,22 +1186,22 @@ class BioyondReactionStation(BioyondWorkstation): if isinstance(temperature, str): temperature = float(temperature) - liquid_step_id = WORKFLOW_STEP_IDS["liquid_feeding_beaker"]["liquid"] - observe_step_id = WORKFLOW_STEP_IDS["liquid_feeding_beaker"]["observe"] + liquid_step_id = self.workflow_step_ids["liquid_feeding_beaker"]["liquid"] + observe_step_id = self.workflow_step_ids["liquid_feeding_beaker"]["observe"] params = { "param_values": { liquid_step_id: { - ACTION_NAMES["liquid_feeding_beaker"]["liquid"]: [ + self.action_names["liquid_feeding_beaker"]["liquid"]: [ {"m": 0, "n": 2, "Key": "volume", "Value": volume}, {"m": 0, "n": 2, "Key": "assignMaterialName", "Value": material_id}, - {"m": 0, "n": 2, "Key": "titrationType", "Value": titration_type} + {"m": 0, "n": 2, "Key": "titrationType", "Value": mapped_titration_type} ] }, observe_step_id: { - ACTION_NAMES["liquid_feeding_beaker"]["observe"]: [ + self.action_names["liquid_feeding_beaker"]["observe"]: [ {"m": 1, "n": 0, "Key": "time", "Value": time}, - {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(torque_variation)}, + {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(mapped_torque_variation)}, {"m": 1, "n": 0, "Key": "temperature", "Value": f"{temperature:.2f}"} ] } @@ -834,11 +1227,18 @@ class BioyondReactionStation(BioyondWorkstation): Args: assign_material_name: 物料名称(液体种类) volume: 分液量(μL) - titration_type: 是否滴定(1=否, 2=是) + titration_type: 是否滴定(NO=1, YES=2) time: 观察时间(分钟) - torque_variation: 是否观察(int类型, 1=否, 2=是) - temperature: 温度(°C) + torque_variation: 是否观察(NO=1, YES=2) + temperature: 温度(C) """ + # 参数映射 + titration_map = {"NO": "1", "YES": "2", "1": "1", "2": "2"} + torque_map = {"NO": "1", "YES": "2", 1: "1", 2: "2", "1": "1", "2": "2"} + + mapped_titration_type = titration_map.get(str(titration_type), "1") + mapped_torque_variation = int(torque_map.get(str(torque_variation), "1")) + self.append_to_workflow_sequence('{"web_workflow_name": "drip_back"}') material_id = self.hardware_interface._get_material_id_by_name(assign_material_name) if material_id is None: @@ -847,22 +1247,22 @@ class BioyondReactionStation(BioyondWorkstation): if isinstance(temperature, str): temperature = float(temperature) - liquid_step_id = WORKFLOW_STEP_IDS["drip_back"]["liquid"] - observe_step_id = WORKFLOW_STEP_IDS["drip_back"]["observe"] + liquid_step_id = self.workflow_step_ids["drip_back"]["liquid"] + observe_step_id = self.workflow_step_ids["drip_back"]["observe"] params = { "param_values": { liquid_step_id: { - ACTION_NAMES["drip_back"]["liquid"]: [ - {"m": 0, "n": 1, "Key": "titrationType", "Value": titration_type}, + self.action_names["drip_back"]["liquid"]: [ + {"m": 0, "n": 1, "Key": "titrationType", "Value": mapped_titration_type}, {"m": 0, "n": 1, "Key": "assignMaterialName", "Value": material_id}, {"m": 0, "n": 1, "Key": "volume", "Value": volume} ] }, observe_step_id: { - ACTION_NAMES["drip_back"]["observe"]: [ + self.action_names["drip_back"]["observe"]: [ {"m": 1, "n": 0, "Key": "time", "Value": time}, - {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(torque_variation)}, + {"m": 1, "n": 0, "Key": "torqueVariation", "Value": str(mapped_torque_variation)}, {"m": 1, "n": 0, "Key": "temperature", "Value": f"{temperature:.2f}"} ] } @@ -874,6 +1274,53 @@ class BioyondReactionStation(BioyondWorkstation): print(f"当前队列长度: {len(self.pending_task_params)}") return json.dumps({"suc": True}) + def add_time_constraint( + self, + duration: int, + start_step_key: str = "", + end_step_key: str = "", + start_point: int = 0, + end_point: int = 0 + ): + """添加时间约束 + + Args: + duration: 时间(秒) + start_step_key: 起点步骤Key (可选, 默认为空则自动选择) + end_step_key: 终点步骤Key (可选, 默认为空则自动选择) + start_point: 起点计时点 (Start=0, End=1) + end_point: 终点计时点 (Start=0, End=1) + """ + # 参数映射 + point_map = {"Start": 0, "End": 1, 0: 0, 1: 1, "0": 0, "1": 1} + + mapped_start_point = point_map.get(start_point, 0) + mapped_end_point = point_map.get(end_point, 0) + + # 注意:此方法应在添加完起点工作流后,添加终点工作流前调用 + + + current_count = len(self._cached_workflow_sequence) + if current_count == 0: + print("⚠️ 无法添加时间约束:当前没有工作流") + return + + start_index = current_count - 1 + end_index = current_count # 指向下一个即将添加的工作流 + + constraint = { + "start_index": start_index, + "start_step_key": start_step_key, + "end_index": end_index, + "end_step_key": end_step_key, + "duration": duration, + "start_point": mapped_start_point, + "end_point": mapped_end_point + } + self.pending_time_constraints.append(constraint) + print(f"已添加时间约束: Workflow[{start_index}].{start_step_key} -> Workflow[{end_index}].{end_step_key} ({duration}s)") + return json.dumps({"suc": True}) + # ==================== 工作流管理方法 ==================== def get_workflow_sequence(self) -> List[str]: @@ -884,12 +1331,115 @@ class BioyondReactionStation(BioyondWorkstation): """ id_to_name = {workflow_id: name for name, workflow_id in self.workflow_mappings.items()} workflow_names = [] - for workflow_id in self.workflow_sequence: + # 使用内部缓存的列表,而不是属性(属性返回 JSON 字符串) + for workflow_id in self._cached_workflow_sequence: workflow_name = id_to_name.get(workflow_id, workflow_id) workflow_names.append(workflow_name) - print(f"工作流序列: {workflow_names}") return workflow_names + def sync_workflow_sequence_from_bioyond(self) -> dict: + """从 Bioyond 系统同步工作流序列 + + 查询 Bioyond 系统中的工作流列表,并更新本地 workflow_sequence + + Returns: + dict: 包含同步结果的字典 + - success: bool, 是否成功 + - workflows: list, 工作流列表 + - message: str, 结果消息 + """ + try: + print(f"[同步工作流序列] 开始从 Bioyond 系统查询工作流...") + + # 检查 hardware_interface 是否可用 + if not hasattr(self, 'hardware_interface') or self.hardware_interface is None: + error_msg = "hardware_interface 未初始化" + print(f"❌ [同步工作流序列] {error_msg}") + return { + "success": False, + "workflows": [], + "message": error_msg + } + + # 查询所有工作流 + query_params = json.dumps({}) + print(f"[同步工作流序列] 调用 hardware_interface.query_workflow...") + workflows_data = self.hardware_interface.query_workflow(query_params) + + print(f"[同步工作流序列] 查询返回数据: {workflows_data}") + + if not workflows_data: + error_msg = "未能从 Bioyond 系统获取工作流数据(返回为空)" + print(f"⚠️ [同步工作流序列] {error_msg}") + return { + "success": False, + "workflows": [], + "message": error_msg + } + + # 获取工作流列表 - Bioyond API 返回的字段是 items,不是 list + workflow_list = workflows_data.get("items", workflows_data.get("list", [])) + print(f"[同步工作流序列] 从 Bioyond 查询到 {len(workflow_list)} 个工作流") + + if len(workflow_list) == 0: + warning_msg = "Bioyond 系统中暂无工作流" + print(f"⚠️ [同步工作流序列] {warning_msg}") + # 清空缓存 + self._cached_workflow_sequence = [] + return { + "success": True, + "workflows": [], + "message": warning_msg + } + + # 清空当前序列 + workflow_ids = [] + + # 构建结果 + synced_workflows = [] + for workflow in workflow_list: + workflow_id = workflow.get("id") + workflow_name = workflow.get("name") + workflow_status = workflow.get("status") # 工作流状态 + + print(f" - 工作流: {workflow_name} (ID: {workflow_id[:8] if workflow_id else 'N/A'}..., 状态: {workflow_status})") + + synced_workflows.append({ + "id": workflow_id, + "name": workflow_name, + "status": workflow_status, + "createTime": workflow.get("createTime"), + "updateTime": workflow.get("updateTime") + }) + + # 添加所有工作流 ID 到执行序列 + if workflow_id: + workflow_ids.append(workflow_id) + + # 更新缓存 + self._cached_workflow_sequence = workflow_ids + + success_msg = f"成功同步 {len(synced_workflows)} 个工作流到本地序列" + print(f"✅ [同步工作流序列] {success_msg}") + print(f"[同步工作流序列] 当前 workflow_sequence: {self._cached_workflow_sequence}") + + return { + "success": True, + "workflows": synced_workflows, + "message": success_msg + } + + except Exception as e: + error_msg = f"从 Bioyond 同步工作流序列失败: {e}" + print(f"❌ [同步工作流序列] {error_msg}") + import traceback + traceback.print_exc() + return { + "success": False, + "workflows": [], + "message": error_msg + } + def workflow_step_query(self, workflow_id: str) -> dict: """查询工作流步骤参数 @@ -912,9 +1462,69 @@ class BioyondReactionStation(BioyondWorkstation): """ return self.hardware_interface.create_order(json_str) + def clear_workflows(self): + """清空缓存的工作流序列和参数""" + self._cached_workflow_sequence = [] + self.pending_time_constraints = [] + print("已清空工作流序列缓存和时间约束队列") + + def clean_all_server_workflows(self) -> Dict[str, Any]: + """ + 清空服务端所有非核心工作流 + 逻辑: + 1. 利用 3.2 接口查询所有工作流 (includeDetail=False) + 2. 提取所有 ID + 3. 利用 3.38 接口 (hard_delete_merged_workflows) 批量删除 + """ + print("正在查询服务端工作流列表...") + try: + # 查询工作流列表 + # 仅需要ID,所以设置 includeDetail=False + query_params = {"includeDetail": False, "type": 0} + query_result = self._post_project_api("/api/lims/workflow/work-flow-list", query_params) + + if query_result.get("code") != 1: + return query_result + + data_obj = query_result.get("data") + + # 处理返回值可能是列表或者分页对象的不同情况 + if isinstance(data_obj, list): + workflows = data_obj + elif isinstance(data_obj, dict): + # 尝试从常见分页字段获取列表 + workflows = data_obj.get("items", data_obj.get("list", [])) + else: + workflows = [] + + if not workflows: + print("无需删除: 服务端无工作流") + return {"code": 1, "message": "服务端无工作流", "timestamp": int(time.time())} + + ids_to_delete = [] + for wf in workflows: + if isinstance(wf, dict): + wf_id = wf.get("id") + if wf_id: + ids_to_delete.append(str(wf_id)) + + if not ids_to_delete: + print("无需删除: 无有效工作流ID") + return {"code": 1, "message": "无有效工作流ID", "timestamp": int(time.time())} + + print(f"查询到 {len(ids_to_delete)} 个工作流,准备调用硬删除接口...") + # 硬删除 + return self.hard_delete_merged_workflows(ids_to_delete) + + except Exception as e: + print(f"❌ 清空工作流业务异常: {str(e)}") + return {"code": 0, "message": str(e), "timestamp": int(time.time())} + def hard_delete_merged_workflows(self, workflow_ids: List[str]) -> Dict[str, Any]: """ - 调用新接口:硬删除合并后的工作流 + 调用新接口:硬删除合并后的工作流 + 根据用户反馈,/api/lims/order/workflows 接口存在校验问题 + 改用 /api/data/order/workflows?workFlowGuids=... 接口 Args: workflow_ids: 要删除的工作流ID数组 @@ -925,7 +1535,30 @@ class BioyondReactionStation(BioyondWorkstation): try: if not isinstance(workflow_ids, list): raise ValueError("workflow_ids必须是字符串数组") - return self._delete_project_api("/api/lims/order/workflows", workflow_ids) + + # 使用新 Endpoint: /api/data/order/workflows + endpoint = "/api/data/order/workflows" + url = f"{self.hardware_interface.host}{endpoint}" + + print(f"\n📤 硬删除请求 (Query Param): {url}") + print(f"IDs count: {len(workflow_ids)}") + + # 使用 requests 的 params 传递数组,会生成 workFlowGuids=id1&workFlowGuids=id2 的形式 + params = {"workFlowGuids": workflow_ids} + + response = requests.delete( + url, + params=params, + timeout=60 + ) + + if response.status_code == 200: + print("✅ 删除请求成功") + return {"code": 1, "message": "删除成功", "timestamp": int(time.time())} + else: + print(f"❌ 删除失败: status={response.status_code}, content={response.text}") + return {"code": 0, "message": f"HTTP {response.status_code}: {response.text}", "timestamp": int(time.time())} + except Exception as e: print(f"❌ 硬删除异常: {str(e)}") return {"code": 0, "message": str(e), "timestamp": int(time.time())} @@ -936,14 +1569,14 @@ class BioyondReactionStation(BioyondWorkstation): """项目接口通用POST调用 参数: - endpoint: 接口路径(例如 /api/lims/order/skip-titration-steps) + endpoint: 接口路径(例如 /api/lims/order/skip-titration-steps) data: 请求体中的 data 字段内容 返回: - dict: 服务端响应,失败时返回 {code:0,message,...} + dict: 服务端响应,失败时返回 {code:0,message,...} """ request_data = { - "apiKey": API_CONFIG["api_key"], + "apiKey": self.bioyond_config["api_key"], "requestTime": self.hardware_interface.get_current_time_iso8601(), "data": data } @@ -976,35 +1609,57 @@ class BioyondReactionStation(BioyondWorkstation): """项目接口通用DELETE调用 参数: - endpoint: 接口路径(例如 /api/lims/order/workflows) + endpoint: 接口路径(例如 /api/lims/order/workflows) data: 请求体中的 data 字段内容 返回: - dict: 服务端响应,失败时返回 {code:0,message,...} + dict: 服务端响应,失败时返回 {code:0,message,...} """ request_data = { - "apiKey": API_CONFIG["api_key"], + "apiKey": self.bioyond_config["api_key"], "requestTime": self.hardware_interface.get_current_time_iso8601(), "data": data } print(f"\n📤 项目DELETE请求: {self.hardware_interface.host}{endpoint}") print(json.dumps(request_data, indent=4, ensure_ascii=False)) try: - response = requests.delete( + # 使用 requests.request 显式发送 Body,避免 requests.delete 可能的兼容性问题 + response = requests.request( + "DELETE", f"{self.hardware_interface.host}{endpoint}", - json=request_data, + data=json.dumps(request_data), headers={"Content-Type": "application/json"}, timeout=30 ) - result = response.json() + + try: + result = response.json() + except json.JSONDecodeError: + print(f"❌ 非JSON响应: {response.text}") + return {"code": 0, "message": "非JSON响应", "timestamp": int(time.time())} + if result.get("code") == 1: print("✅ 请求成功") else: - print(f"❌ 请求失败: {result.get('message','未知错误')}") + # 尝试提取详细错误信息 (兼容 Abp 等框架的 error 结构) + msg = result.get('message') + if not msg: + error_obj = result.get('error', {}) + if isinstance(error_obj, dict): + msg = error_obj.get('message') + details = error_obj.get('details') + if details: + msg = f"{msg}: {details}" + + if not msg: + msg = f"未知错误 (Status: {response.status_code})" + + print(f"❌ 请求失败: {msg}") + # 打印完整返回以供调试 + print(f"服务端返回: {json.dumps(result, ensure_ascii=False)}") + return result - except json.JSONDecodeError: - print("❌ 非JSON响应") - return {"code": 0, "message": "非JSON响应", "timestamp": int(time.time())} + except requests.exceptions.Timeout: print("❌ 请求超时") return {"code": 0, "message": "请求超时", "timestamp": int(time.time())} @@ -1030,16 +1685,16 @@ class BioyondReactionStation(BioyondWorkstation): for name in web_workflow_list: workflow_id = self.workflow_mappings.get(name, "") if not workflow_id: - print(f"警告:未找到工作流名称 {name} 对应的 ID") + print(f"警告:未找到工作流名称 {name} 对应的 ID") continue workflows_result.append({"id": workflow_id, "name": name}) print(f"process_web_workflows 输出: {workflows_result}") return workflows_result except json.JSONDecodeError as e: - print(f"错误:无法解析 web_workflow_json: {e}") + print(f"错误:无法解析 web_workflow_json: {e}") return [] except Exception as e: - print(f"错误:处理工作流失败: {e}") + print(f"错误:处理工作流失败: {e}") return [] def _build_workflows_with_parameters(self, workflows_result: list) -> list: @@ -1047,7 +1702,7 @@ class BioyondReactionStation(BioyondWorkstation): 构建带参数的工作流列表 Args: - workflows_result: 处理后的工作流列表(应为包含 id 和 name 的字典列表) + workflows_result: 处理后的工作流列表(应为包含 id 和 name 的字典列表) Returns: 符合新接口格式的工作流参数结构 @@ -1059,24 +1714,24 @@ class BioyondReactionStation(BioyondWorkstation): for idx, workflow_info in enumerate(workflows_result): if not isinstance(workflow_info, dict): - print(f"错误:workflows_result[{idx}] 不是字典,而是 {type(workflow_info)}: {workflow_info}") + print(f"错误:workflows_result[{idx}] 不是字典,而是 {type(workflow_info)}: {workflow_info}") continue workflow_id = workflow_info.get("id") if not workflow_id: - print(f"警告:workflows_result[{idx}] 缺少 'id' 键") + print(f"警告:workflows_result[{idx}] 缺少 'id' 键") continue workflow_name = workflow_info.get("name", "") # print(f"\n🔧 处理工作流 [{idx}]: {workflow_name} (ID: {workflow_id})") if idx >= len(self.pending_task_params): - # print(f" ⚠️ 无对应参数,跳过") + # print(f" ⚠️ 无对应参数,跳过") workflows_with_params.append({"id": workflow_id}) continue param_data = self.pending_task_params[idx] param_values = param_data.get("param_values", {}) if not param_values: - # print(f" ⚠️ 参数为空,跳过") + # print(f" ⚠️ 参数为空,跳过") workflows_with_params.append({"id": workflow_id}) continue @@ -1135,10 +1790,10 @@ class BioyondReactionStation(BioyondWorkstation): def merge_workflow_with_parameters(self, json_str: str) -> dict: """ - 调用新接口:合并工作流并传递参数 + 调用新接口:合并工作流并传递参数 Args: - json_str: JSON格式的字符串,包含: + json_str: JSON格式的字符串,包含: - name: 工作流名称 - workflows: [{"id": "工作流ID", "stepParameters": {...}}] @@ -1148,7 +1803,7 @@ class BioyondReactionStation(BioyondWorkstation): try: data = json.loads(json_str) - # 在工作流名称后面添加时间戳,避免重复 + # 在工作流名称后面添加时间戳,避免重复 if "name" in data and data["name"]: timestamp = self.hardware_interface.get_current_time_iso8601().replace(":", "-").replace(".", "-") original_name = data["name"] @@ -1156,7 +1811,7 @@ class BioyondReactionStation(BioyondWorkstation): print(f"🕒 工作流名称已添加时间戳: {original_name} -> {data['name']}") request_data = { - "apiKey": API_CONFIG["api_key"], + "apiKey": self.bioyond_config["api_key"], "requestTime": self.hardware_interface.get_current_time_iso8601(), "data": data } @@ -1195,7 +1850,7 @@ class BioyondReactionStation(BioyondWorkstation): return None if result.get("code") == 1: - print(f"✅ 工作流合并成功(带参数)") + print(f"✅ 工作流合并成功(带参数)") return result.get("data", {}) else: error_msg = result.get('message', '未知错误') @@ -1216,7 +1871,7 @@ class BioyondReactionStation(BioyondWorkstation): return None def _validate_and_refresh_workflow_if_needed(self, workflow_name: str) -> bool: - """验证工作流ID是否有效,如果无效则重新合并 + """验证工作流ID是否有效,如果无效则重新合并 Args: workflow_name: 工作流名称 @@ -1225,17 +1880,17 @@ class BioyondReactionStation(BioyondWorkstation): bool: 验证或刷新是否成功 """ print(f"\n🔍 验证工作流ID有效性...") - if not self.workflow_sequence: - print(f" ⚠️ 工作流序列为空,需要重新合并") + if not self._cached_workflow_sequence: + print(f" ⚠️ 工作流序列为空,需要重新合并") return False - first_workflow_id = self.workflow_sequence[0] + first_workflow_id = self._cached_workflow_sequence[0] try: structure = self.workflow_step_query(first_workflow_id) if structure: print(f" ✅ 工作流ID有效") return True else: - print(f" ⚠️ 工作流ID已过期,需要重新合并") + print(f" ⚠️ 工作流ID已过期,需要重新合并") return False except Exception as e: print(f" ❌ 工作流ID验证失败: {e}") @@ -1244,7 +1899,7 @@ class BioyondReactionStation(BioyondWorkstation): def process_and_execute_workflow(self, workflow_name: str, task_name: str) -> dict: """ - 一站式处理工作流程:解析网页工作流列表,合并工作流(带参数),然后发布任务 + 一站式处理工作流程:解析网页工作流列表,合并工作流(带参数),然后发布任务 Args: workflow_name: 合并后的工作流名称 @@ -1269,12 +1924,111 @@ class BioyondReactionStation(BioyondWorkstation): workflows_with_params = self._build_workflows_with_parameters(workflows_result) + # === 构建时间约束 (tcmBs) === + tcm_bs_list = [] + if self.pending_time_constraints: + print(f"\n🔗 处理时间约束 ({len(self.pending_time_constraints)} 个)...") + + + # 建立索引到名称的映射 + workflow_names_by_index = [w["name"] for w in workflows_result] + + # 默认步骤映射表 + DEFAULT_STEP_KEYS = { + "Solid_feeding_vials": "feeding", + "liquid_feeding_beaker": "liquid", + "Liquid_feeding_vials(non-titration)": "liquid", + "Liquid_feeding_solvents": "liquid", + "Liquid_feeding(titration)": "liquid", + "Drip_back": "liquid", + "reactor_taken_in": "config" + } + + for c in self.pending_time_constraints: + try: + start_idx = c["start_index"] + end_idx = c["end_index"] + + if start_idx >= len(workflow_names_by_index) or end_idx >= len(workflow_names_by_index): + print(f" ❌ 约束索引越界: {start_idx} -> {end_idx} (总数: {len(workflow_names_by_index)})") + continue + + start_wf_name = workflow_names_by_index[start_idx] + end_wf_name = workflow_names_by_index[end_idx] + + # 辅助函数:根据名称查找 config 中的 key + def find_config_key(name): + # 1. 直接匹配 + if name in self.workflow_step_ids: + return name + # 2. 尝试反向查找 WORKFLOW_TO_SECTION_MAP (如果需要) + # 3. 尝试查找 WORKFLOW_MAPPINGS 的 key (忽略大小写匹配或特定映射) + + # 硬编码常见映射 (Web名称 -> Config Key) + mapping = { + "Solid_feeding_vials": "solid_feeding_vials", + "Liquid_feeding_vials(non-titration)": "liquid_feeding_vials_non_titration", + "Liquid_feeding_solvents": "liquid_feeding_solvents", + "Liquid_feeding(titration)": "liquid_feeding_titration", + "Drip_back": "drip_back" + } + return mapping.get(name, name) + + start_config_key = find_config_key(start_wf_name) + end_config_key = find_config_key(end_wf_name) + + # 查找 UUID + if start_config_key not in self.workflow_step_ids: + print(f" ❌ 找不到工作流 {start_wf_name} (Key: {start_config_key}) 的步骤配置") + continue + if end_config_key not in self.workflow_step_ids: + print(f" ❌ 找不到工作流 {end_wf_name} (Key: {end_config_key}) 的步骤配置") + continue + + # 确定步骤 Key + start_key = c["start_step_key"] + if not start_key: + start_key = DEFAULT_STEP_KEYS.get(start_wf_name) + if not start_key: + print(f" ❌ 未指定起点步骤Key且无默认值: {start_wf_name}") + continue + + end_key = c["end_step_key"] + if not end_key: + end_key = DEFAULT_STEP_KEYS.get(end_wf_name) + if not end_key: + print(f" ❌ 未指定终点步骤Key且无默认值: {end_wf_name}") + continue + + start_step_id = self.workflow_step_ids[start_config_key].get(start_key) + end_step_id = self.workflow_step_ids[end_config_key].get(end_key) + + if not start_step_id or not end_step_id: + print(f" ❌ 无法解析步骤ID: {start_config_key}.{start_key} -> {end_config_key}.{end_key}") + continue + + tcm_bs_list.append({ + "startWorkflowIndex": start_idx, + "startStepId": start_step_id, + "startComparePoint": c["start_point"], + "endWorkflowIndex": end_idx, + "endStepId": end_step_id, + "endComparePoint": c["end_point"], + "ct": c["duration"], + "description": f"Constraint {start_idx}->{end_idx}" + }) + print(f" ✅ 添加约束: {start_wf_name}({start_key}) -> {end_wf_name}({end_key})") + + except Exception as e: + print(f" ❌ 处理约束时出错: {e}") + merge_data = { "name": workflow_name, - "workflows": workflows_with_params + "workflows": workflows_with_params, + "tcmBs": tcm_bs_list } - # print(f"\n🔄 合并工作流(带参数),名称: {workflow_name}") + # print(f"\n🔄 合并工作流(带参数),名称: {workflow_name}") merged_workflow = self.merge_workflow_with_parameters(json.dumps(merge_data)) if not merged_workflow: @@ -1291,20 +2045,28 @@ class BioyondReactionStation(BioyondWorkstation): "paramValues": {} }] - result = self.create_order(json.dumps(order_params)) - - if not result: - return self._create_error_result("创建任务失败", "create_order") - - # 清空工作流序列和参数,防止下次执行时累积重复 - self.pending_task_params = [] - self.clear_workflows() # 清空工作流序列,避免重复累积 + # 尝试创建订单:无论成功或失败,都需要在本次尝试结束后清理本地队列,避免下一次重复累积 + try: + result = self.create_order(json.dumps(order_params)) + if not result: + # 返回错误结果之前先记录情况(稍后由 finally 清理队列) + print("⚠️ 创建任务返回空或失败响应,稍后将清理本地队列以避免重复累积") + return self._create_error_result("创建任务失败", "create_order") + finally: + # 无论任务创建成功与否,都要清空本地保存的参数和工作流序列,防止下次重复 + try: + self.pending_task_params = [] + self.clear_workflows() # 清空工作流序列,避免重复累积 + print("✅ 已清理 pending_task_params 与 workflow_sequence") + except Exception as _ex: + # 记录清理失败,但不要阻塞原始返回 + print(f"❌ 清理队列时发生异常: {_ex}") # print(f"\n✅ 任务创建成功: {result}") # print(f"\n✅ 任务创建成功") print(f"{'='*60}\n") - # 返回结果,包含合并后的工作流数据和订单参数 + # 返回结果,包含合并后的工作流数据和订单参数 return json.dumps({ "success": True, "result": result, @@ -1321,10 +2083,42 @@ class BioyondReactionStation(BioyondWorkstation): preintake_id: 通量ID Returns: - Dict[str, Any]: 服务器响应,包含状态码、消息和时间戳 + Dict[str, Any]: 服务器响应,包含状态码,消息和时间戳 """ try: return self._post_project_api("/api/lims/order/skip-titration-steps", preintake_id) except Exception as e: print(f"❌ 跳过滴定异常: {str(e)}") return {"code": 0, "message": str(e), "timestamp": int(time.time())} + + def set_reactor_temperature(self, reactor_id: int, temperature: float) -> str: + """ + 设置反应器温度 + + Args: + reactor_id: 反应器编号 (1-5) + temperature: 目标温度 (°C) + + Returns: + str: JSON 字符串,格式为 {"suc": True/False, "msg": "描述信息"} + """ + if reactor_id not in range(1, 6): + return json.dumps({"suc": False, "msg": "反应器编号必须在 1-5 之间"}) + + try: + payload = { + "deviceTypeName": f"反应模块{chr(64 + reactor_id)}", # 1->A, 2->B... + "temperature": float(temperature) + } + resp = requests.post( + f"{self.hardware_interface.host}/api/lims/device/set-reactor-temperatue", + json=payload, + headers={"Content-Type": "application/json"}, + timeout=10 + ) + if resp.status_code == 200: + return json.dumps({"suc": True, "msg": "温度设置成功"}) + else: + return json.dumps({"suc": False, "msg": f"温度设置失败,HTTP {resp.status_code}"}) + except Exception as e: + return json.dumps({"suc": False, "msg": f"温度设置异常: {str(e)}"}) diff --git a/unilabos/devices/workstation/bioyond_studio/station.py b/unilabos/devices/workstation/bioyond_studio/station.py index 22b846d..327d819 100644 --- a/unilabos/devices/workstation/bioyond_studio/station.py +++ b/unilabos/devices/workstation/bioyond_studio/station.py @@ -6,6 +6,7 @@ Bioyond Workstation Implementation """ import time import traceback +import threading from datetime import datetime from typing import Dict, Any, List, Optional, Union import json @@ -27,6 +28,90 @@ from pylabrobot.resources.resource import Resource as ResourcePLR from unilabos.devices.workstation.workstation_http_service import WorkstationHTTPService +class ConnectionMonitor: + """Bioyond连接监控器""" + def __init__(self, workstation, check_interval=30): + self.workstation = workstation + self.check_interval = check_interval + self._running = False + self._thread = None + self._last_status = "unknown" + + def start(self): + if self._running: + return + self._running = True + self._thread = threading.Thread(target=self._monitor_loop, daemon=True, name="BioyondConnectionMonitor") + self._thread.start() + logger.info("Bioyond连接监控器已启动") + + def stop(self): + self._running = False + if self._thread: + self._thread.join(timeout=2) + logger.info("Bioyond连接监控器已停止") + + def _monitor_loop(self): + while self._running: + try: + # 使用 lightweight API 检查连接 + # query_matial_type_list 是比较快的查询 + start_time = time.time() + result = self.workstation.hardware_interface.material_type_list() + + status = "online" if result else "offline" + msg = "Connection established" if status == "online" else "Failed to get material type list" + + if status != self._last_status: + logger.info(f"Bioyond连接状态变更: {self._last_status} -> {status}") + self._publish_event(status, msg) + self._last_status = status + + # 发布心跳 (可选,或者只在状态变更时发布) + # self._publish_event(status, msg) + + except Exception as e: + logger.error(f"Bioyond连接检查异常: {e}") + if self._last_status != "error": + self._publish_event("error", str(e)) + self._last_status = "error" + + time.sleep(self.check_interval) + + def _publish_event(self, status, message): + try: + if hasattr(self.workstation, "_ros_node") and self.workstation._ros_node: + event_data = { + "status": status, + "message": message, + "timestamp": datetime.now().isoformat() + } + + # 动态发布消息,需要在 ROS2DeviceNode 中有对应支持 + # 这里假设通用事件发布机制,使用 String 类型的 topic + # 话题: //events/device_status + ns = self.workstation._ros_node.namespace + topic = f"{ns}/events/device_status" + + # 使用 ROS2DeviceNode 的发布功能 + # 如果没有预定义的 publisher,需要动态创建 + # 注意:workstation base node 可能没有自动创建 arbitrary publishers 的机制 + # 这里我们先尝试用 String json 发布 + + # 在 ROS2DeviceNode 中通常需要先 create_publisher + # 为了简单起见,我们检查是否已有 publisher,没有则创建 + if not hasattr(self.workstation, "_device_status_pub"): + self.workstation._device_status_pub = self.workstation._ros_node.create_publisher( + String, topic, 10 + ) + + self.workstation._device_status_pub.publish( + convert_to_ros_msg(String, json.dumps(event_data, ensure_ascii=False)) + ) + except Exception as e: + logger.error(f"发布设备状态事件失败: {e}") + + class BioyondResourceSynchronizer(ResourceSynchronizer): """Bioyond资源同步器 @@ -172,9 +257,8 @@ class BioyondResourceSynchronizer(ResourceSynchronizer): else: logger.info(f"[同步→Bioyond] ➕ 物料不存在于 Bioyond,将创建新物料并入库") - # 第1步:获取仓库配置 - from .config import WAREHOUSE_MAPPING - warehouse_mapping = WAREHOUSE_MAPPING + # 第1步:从配置中获取仓库配置 + warehouse_mapping = self.bioyond_config.get("warehouse_mapping", {}) # 确定目标仓库名称 parent_name = None @@ -236,14 +320,20 @@ class BioyondResourceSynchronizer(ResourceSynchronizer): # 第2步:转换为 Bioyond 格式 logger.info(f"[同步→Bioyond] 🔄 转换物料为 Bioyond 格式...") - # 导入物料默认参数配置 - from .config import MATERIAL_DEFAULT_PARAMETERS + # 从配置中获取物料默认参数 + material_default_params = self.workstation.bioyond_config.get("material_default_parameters", {}) + material_type_params = self.workstation.bioyond_config.get("material_type_parameters", {}) + + # 合并参数配置:物料名称参数 + typeId参数(转换为 type: 格式) + merged_params = material_default_params.copy() + for type_id, params in material_type_params.items(): + merged_params[f"type:{type_id}"] = params bioyond_material = resource_plr_to_bioyond( [resource], type_mapping=self.workstation.bioyond_config["material_type_mappings"], warehouse_mapping=self.workstation.bioyond_config["warehouse_mapping"], - material_params=MATERIAL_DEFAULT_PARAMETERS + material_params=merged_params )[0] logger.info(f"[同步→Bioyond] 🔧 准备覆盖locations字段,目标仓库: {parent_name}, 库位: {update_site}, UUID: {target_location_uuid[:8]}...") @@ -466,13 +556,20 @@ class BioyondResourceSynchronizer(ResourceSynchronizer): return material_bioyond_id # 转换为 Bioyond 格式 - from .config import MATERIAL_DEFAULT_PARAMETERS + # 从配置中获取物料默认参数 + material_default_params = self.workstation.bioyond_config.get("material_default_parameters", {}) + material_type_params = self.workstation.bioyond_config.get("material_type_parameters", {}) + + # 合并参数配置:物料名称参数 + typeId参数(转换为 type: 格式) + merged_params = material_default_params.copy() + for type_id, params in material_type_params.items(): + merged_params[f"type:{type_id}"] = params bioyond_material = resource_plr_to_bioyond( [resource], type_mapping=self.workstation.bioyond_config["material_type_mappings"], warehouse_mapping=self.workstation.bioyond_config["warehouse_mapping"], - material_params=MATERIAL_DEFAULT_PARAMETERS + material_params=merged_params )[0] # ⚠️ 关键:创建物料时不设置 locations,让 Bioyond 系统暂不分配库位 @@ -526,8 +623,7 @@ class BioyondResourceSynchronizer(ResourceSynchronizer): logger.info(f"[物料入库] 目标库位: {update_site}") # 获取仓库配置和目标库位 UUID - from .config import WAREHOUSE_MAPPING - warehouse_mapping = WAREHOUSE_MAPPING + warehouse_mapping = self.workstation.bioyond_config.get("warehouse_mapping", {}) parent_name = None target_location_uuid = None @@ -582,6 +678,44 @@ class BioyondWorkstation(WorkstationBase): 集成Bioyond物料管理的工作站实现 """ + def _publish_task_status( + self, + task_id: str, + task_type: str, + status: str, + result: dict = None, + progress: float = 0.0, + task_code: str = None + ): + """发布任务状态事件""" + try: + if not getattr(self, "_ros_node", None): + return + + event_data = { + "task_id": task_id, + "task_code": task_code, + "task_type": task_type, + "status": status, + "progress": progress, + "timestamp": datetime.now().isoformat() + } + if result: + event_data["result"] = result + + topic = f"{self._ros_node.namespace}/events/task_status" + + if not hasattr(self, "_task_status_pub"): + self._task_status_pub = self._ros_node.create_publisher( + String, topic, 10 + ) + + self._task_status_pub.publish( + convert_to_ros_msg(String, json.dumps(event_data, ensure_ascii=False)) + ) + except Exception as e: + logger.error(f"发布任务状态事件失败: {e}") + def __init__( self, bioyond_config: Optional[Dict[str, Any]] = None, @@ -603,10 +737,28 @@ class BioyondWorkstation(WorkstationBase): raise ValueError("Deck 配置不能为空,请在配置文件中添加正确的 deck 配置") # 初始化 warehouses 属性 - self.deck.warehouses = {} - for resource in self.deck.children: - if isinstance(resource, WareHouse): - self.deck.warehouses[resource.name] = resource + if not hasattr(self.deck, "warehouses") or self.deck.warehouses is None: + self.deck.warehouses = {} + + # 仅当 warehouses 为空时尝试重新扫描(避免覆盖子类的修复) + if not self.deck.warehouses: + for resource in self.deck.children: + # 兼容性增强: 只要是仓库类别或者是 WareHouse 实例均可 + is_warehouse = isinstance(resource, WareHouse) or getattr(resource, "category", "") == "warehouse" + + # 如果配置中有定义,也可以认定为 warehouse + if not is_warehouse and "warehouse_mapping" in bioyond_config: + if resource.name in bioyond_config["warehouse_mapping"]: + is_warehouse = True + + if is_warehouse: + self.deck.warehouses[resource.name] = resource + # 确保 category 被正确设置,方便后续使用 + if getattr(resource, "category", "") != "warehouse": + try: + resource.category = "warehouse" + except: + pass # 创建通信模块 self._create_communication_module(bioyond_config) @@ -625,18 +777,22 @@ class BioyondWorkstation(WorkstationBase): self._set_workflow_mappings(bioyond_config["workflow_mappings"]) # 准备 HTTP 报送接收服务配置(延迟到 post_init 启动) - # 从 bioyond_config 中获取,如果没有则使用默认值 + # 从 bioyond_config 中的 http_service_config 获取 + http_service_cfg = bioyond_config.get("http_service_config", {}) self._http_service_config = { - "host": bioyond_config.get("http_service_host", bioyond_config.get("HTTP_host", "")), - "port": bioyond_config.get("http_service_port", bioyond_config.get("HTTP_port", 0)) + "host": http_service_cfg.get("http_service_host", "127.0.0.1"), + "port": http_service_cfg.get("http_service_port", 8080) } - self.http_service = None # 将在 post_init 中启动 + self.http_service = None # 将在 post_init 启动 + self.connection_monitor = None # 将在 post_init 启动 logger.info(f"Bioyond工作站初始化完成") def __del__(self): """析构函数:清理资源,停止 HTTP 服务""" try: + if hasattr(self, 'connection_monitor') and self.connection_monitor: + self.connection_monitor.stop() if hasattr(self, 'http_service') and self.http_service is not None: logger.info("正在停止 HTTP 报送服务...") self.http_service.stop() @@ -646,6 +802,13 @@ class BioyondWorkstation(WorkstationBase): def post_init(self, ros_node: ROS2WorkstationNode): self._ros_node = ros_node + # 启动连接监控 + try: + self.connection_monitor = ConnectionMonitor(self) + self.connection_monitor.start() + except Exception as e: + logger.error(f"启动连接监控失败: {e}") + # 启动 HTTP 报送接收服务(现在 device_id 已可用) # ⚠️ 检查子类是否已经自己管理 HTTP 服务 if self.bioyond_config.get("_disable_auto_http_service"): @@ -690,14 +853,14 @@ class BioyondWorkstation(WorkstationBase): def _create_communication_module(self, config: Optional[Dict[str, Any]] = None) -> None: """创建Bioyond通信模块""" - # 使用传入的 config 参数(来自 bioyond_config) - # 不再依赖全局变量 API_CONFIG 等 + # 直接使用传入的配置,不再使用默认值 + # 所有配置必须从 JSON 文件中提供 if config: self.bioyond_config = config else: - # 如果没有传入配置,创建空配置(用于测试或兼容性) + # 如果没有配置,使用空字典(会导致后续错误,但这是预期的) self.bioyond_config = {} - + print("警告: 未提供 bioyond_config,请确保在 JSON 配置文件中提供完整配置") self.hardware_interface = BioyondV1RPC(self.bioyond_config) @@ -1011,7 +1174,15 @@ class BioyondWorkstation(WorkstationBase): workflow_id = self._get_workflow(actual_workflow_name) if workflow_id: - self.workflow_sequence.append(workflow_id) + # 兼容 BioyondReactionStation 中 workflow_sequence 被重写为 property 的情况 + if isinstance(self.workflow_sequence, list): + self.workflow_sequence.append(workflow_id) + elif hasattr(self, "_cached_workflow_sequence") and isinstance(self._cached_workflow_sequence, list): + self._cached_workflow_sequence.append(workflow_id) + else: + print(f"❌ 无法添加工作流: workflow_sequence 类型错误 {type(self.workflow_sequence)}") + return False + print(f"添加工作流到执行顺序: {actual_workflow_name} -> {workflow_id}") return True return False @@ -1212,6 +1383,22 @@ class BioyondWorkstation(WorkstationBase): # TODO: 根据实际业务需求处理步骤完成逻辑 # 例如:更新数据库、触发后续流程等 + # 发布任务状态事件 (running/progress update) + self._publish_task_status( + task_id=data.get('orderCode'), # 使用 OrderCode 作为关联 ID + task_code=data.get('orderCode'), + task_type="bioyond_step", + status="running", + progress=0.5, # 步骤完成视为任务进行中 + result={"step_name": data.get('stepName'), "step_id": data.get('stepId')} + ) + + # 更新物料信息 + # 步骤完成后,物料状态可能发生变化(如位置、用量等),触发同步 + logger.info(f"[步骤完成报送] 触发物料同步...") + self.resource_synchronizer.sync_from_external() + + return { "processed": True, "step_id": data.get('stepId'), @@ -1246,6 +1433,17 @@ class BioyondWorkstation(WorkstationBase): # TODO: 根据实际业务需求处理通量完成逻辑 + # 发布任务状态事件 + self._publish_task_status( + task_id=data.get('orderCode'), + task_code=data.get('orderCode'), + task_type="bioyond_sample", + status="running", + progress=0.7, + result={"sample_id": data.get('sampleId'), "status": status_desc} + ) + + return { "processed": True, "sample_id": data.get('sampleId'), @@ -1285,6 +1483,32 @@ class BioyondWorkstation(WorkstationBase): # TODO: 根据实际业务需求处理任务完成逻辑 # 例如:更新物料库存、生成报表等 + # 映射状态到事件状态 + event_status = "completed" + if str(data.get('status')) in ["-11", "-12"]: + event_status = "error" + elif str(data.get('status')) == "30": + event_status = "completed" + else: + event_status = "running" # 其他状态视为运行中(或根据实际定义) + + # 发布任务状态事件 + self._publish_task_status( + task_id=data.get('orderCode'), + task_code=data.get('orderCode'), + task_type="bioyond_order", + status=event_status, + progress=1.0 if event_status in ["completed", "error"] else 0.9, + result={"order_name": data.get('orderName'), "status": status_desc, "materials_count": len(used_materials)} + ) + + # 更新物料信息 + # 任务完成后,且状态为完成时,触发同步以更新最终物料状态 + if event_status == "completed": + logger.info(f"[任务完成报送] 触发物料同步...") + self.resource_synchronizer.sync_from_external() + + return { "processed": True, "order_code": data.get('orderCode'), diff --git a/unilabos/devices/workstation/workstation_http_service.py b/unilabos/devices/workstation/workstation_http_service.py index 11d8769..82fb6cb 100644 --- a/unilabos/devices/workstation/workstation_http_service.py +++ b/unilabos/devices/workstation/workstation_http_service.py @@ -459,12 +459,12 @@ class WorkstationHTTPHandler(BaseHTTPRequestHandler): # 验证必需字段 if 'brand' in request_data: if request_data['brand'] == "bioyond": # 奔曜 - error_msg = request_data["text"] - logger.info(f"收到奔曜错误处理报送: {error_msg}") + material_data = request_data["text"] + logger.info(f"收到奔曜物料变更报送: {material_data}") return HttpResponse( success=True, - message=f"错误处理报送已收到: {error_msg}", - acknowledgment_id=f"ERROR_{int(time.time() * 1000)}_{error_msg.get('action_id', 'unknown')}", + message=f"物料变更报送已收到: {material_data}", + acknowledgment_id=f"MATERIAL_{int(time.time() * 1000)}_{material_data.get('id', 'unknown')}", data=None ) else: diff --git a/unilabos/registry/devices/bioyond_dispensing_station.yaml b/unilabos/registry/devices/bioyond_dispensing_station.yaml index 9ae76b7..97b55cc 100644 --- a/unilabos/registry/devices/bioyond_dispensing_station.yaml +++ b/unilabos/registry/devices/bioyond_dispensing_station.yaml @@ -5,229 +5,6 @@ bioyond_dispensing_station: - bioyond_dispensing_station class: action_value_mappings: - auto-brief_step_parameters: - feedback: {} - goal: {} - goal_default: - data: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - data: - type: object - required: - - data - type: object - result: {} - required: - - goal - title: brief_step_parameters参数 - type: object - type: UniLabJsonCommand - auto-compute_experiment_design: - feedback: {} - goal: {} - goal_default: - m_tot: '70' - ratio: null - titration_percent: '0.03' - wt_percent: '0.25' - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - m_tot: - default: '70' - type: string - ratio: - type: object - titration_percent: - default: '0.03' - type: string - wt_percent: - default: '0.25' - type: string - required: - - ratio - type: object - result: - properties: - feeding_order: - items: {} - title: Feeding Order - type: array - return_info: - title: Return Info - type: string - solutions: - items: {} - title: Solutions - type: array - solvents: - additionalProperties: true - title: Solvents - type: object - titration: - additionalProperties: true - title: Titration - type: object - required: - - solutions - - titration - - solvents - - feeding_order - - return_info - title: ComputeExperimentDesignReturn - type: object - required: - - goal - title: compute_experiment_design参数 - type: object - type: UniLabJsonCommand - auto-process_order_finish_report: - feedback: {} - goal: {} - goal_default: - report_request: null - used_materials: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - report_request: - type: string - used_materials: - type: string - required: - - report_request - - used_materials - type: object - result: {} - required: - - goal - title: process_order_finish_report参数 - type: object - type: UniLabJsonCommand - auto-project_order_report: - feedback: {} - goal: {} - goal_default: - order_id: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - order_id: - type: string - required: - - order_id - type: object - result: {} - required: - - goal - title: project_order_report参数 - type: object - type: UniLabJsonCommand - auto-query_resource_by_name: - feedback: {} - goal: {} - goal_default: - material_name: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - material_name: - type: string - required: - - material_name - type: object - result: {} - required: - - goal - title: query_resource_by_name参数 - type: object - type: UniLabJsonCommand - auto-transfer_materials_to_reaction_station: - feedback: {} - goal: {} - goal_default: - target_device_id: null - transfer_groups: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - target_device_id: - type: string - transfer_groups: - type: array - required: - - target_device_id - - transfer_groups - type: object - result: {} - required: - - goal - title: transfer_materials_to_reaction_station参数 - type: object - type: UniLabJsonCommand - auto-workflow_sample_locations: - feedback: {} - goal: {} - goal_default: - workflow_id: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - workflow_id: - type: string - required: - - workflow_id - type: object - result: {} - required: - - goal - title: workflow_sample_locations参数 - type: object - type: UniLabJsonCommand batch_create_90_10_vial_feeding_tasks: feedback: {} goal: @@ -394,6 +171,99 @@ bioyond_dispensing_station: title: BatchCreateDiamineSolutionTasks type: object type: UniLabJsonCommand + compute_experiment_design: + feedback: {} + goal: + m_tot: m_tot + ratio: ratio + titration_percent: titration_percent + wt_percent: wt_percent + goal_default: + m_tot: '70' + ratio: '' + titration_percent: '0.03' + wt_percent: '0.25' + handles: + output: + - data_key: solutions + data_source: executor + data_type: array + handler_key: solutions + io_type: sink + label: Solution Data From Python + - data_key: titration + data_source: executor + data_type: object + handler_key: titration + io_type: sink + label: Titration Data From Calculation Node + - data_key: solvents + data_source: executor + data_type: object + handler_key: solvents + io_type: sink + label: Solvents Data From Calculation Node + - data_key: feeding_order + data_source: executor + data_type: array + handler_key: feeding_order + io_type: sink + label: Feeding Order Data From Calculation Node + result: + feeding_order: feeding_order + return_info: return_info + solutions: solutions + solvents: solvents + titration: titration + schema: + description: 计算实验设计,输出solutions/titration/solvents/feeding_order用于后续节点。 + properties: + feedback: {} + goal: + properties: + m_tot: + default: '70' + description: 总质量(g) + type: string + ratio: + description: 组分摩尔比的对象,保持输入顺序,如{"MDA":1,"BTDA":1} + type: string + titration_percent: + default: '0.03' + description: 滴定比例(10%部分) + type: string + wt_percent: + default: '0.25' + description: 目标固含质量分数 + type: string + required: + - ratio + type: object + result: + properties: + feeding_order: + type: array + return_info: + type: string + solutions: + type: array + solvents: + type: object + titration: + type: object + required: + - solutions + - titration + - solvents + - feeding_order + - return_info + title: ComputeExperimentDesign_Result + type: object + required: + - goal + title: ComputeExperimentDesign + type: object + type: UniLabJsonCommand create_90_10_vial_feeding_task: feedback: {} goal: @@ -620,6 +490,89 @@ bioyond_dispensing_station: title: DispenStationSolnPrep type: object type: DispenStationSolnPrep + scheduler_start: + feedback: {} + goal: {} + goal_default: {} + handles: {} + result: + return_info: return_info + schema: + description: 启动调度器 - 启动Bioyond配液站的任务调度器,开始执行队列中的任务 + properties: + feedback: {} + goal: + properties: {} + required: [] + type: object + result: + properties: + return_info: + description: 调度器启动结果,成功返回1,失败返回0 + type: integer + required: + - return_info + title: scheduler_start结果 + type: object + required: + - goal + title: scheduler_start参数 + type: object + type: UniLabJsonCommand + transfer_materials_to_reaction_station: + feedback: {} + goal: + target_device_id: target_device_id + transfer_groups: transfer_groups + goal_default: + target_device_id: '' + transfer_groups: '' + handles: {} + placeholder_keys: + target_device_id: unilabos_devices + result: {} + schema: + description: 将配液站完成的物料(溶液、样品等)转移到指定反应站的堆栈库位。支持配置多组转移任务,每组包含物料名称、目标堆栈和目标库位。 + properties: + feedback: {} + goal: + properties: + target_device_id: + description: 目标反应站设备ID(从设备列表中选择,所有转移组都使用同一个目标设备) + type: string + transfer_groups: + description: 转移任务组列表,每组包含物料名称、目标堆栈和目标库位,可以添加多组 + items: + properties: + materials: + description: 物料名称(手动输入,系统将通过RPC查询验证) + type: string + target_sites: + description: 目标库位(手动输入,如"A01") + type: string + target_stack: + description: 目标堆栈名称(从列表选择) + enum: + - 堆栈1左 + - 堆栈1右 + - 站内试剂存放堆栈 + type: string + required: + - materials + - target_stack + - target_sites + type: object + type: array + required: + - target_device_id + - transfer_groups + type: object + result: {} + required: + - goal + title: transfer_materials_to_reaction_station参数 + type: object + type: UniLabJsonCommand wait_for_multiple_orders_and_get_reports: feedback: {} goal: @@ -688,7 +641,7 @@ bioyond_dispensing_station: title: WaitForMultipleOrdersAndGetReports type: object type: UniLabJsonCommand - module: unilabos.devices.workstation.bioyond_studio.dispensing_station:BioyondDispensingStation + module: unilabos.devices.workstation.bioyond_studio.dispensing_station.dispensing_station:BioyondDispensingStation status_types: {} type: python config_info: [] @@ -699,15 +652,16 @@ bioyond_dispensing_station: config: properties: config: - type: string + type: object deck: type: string - required: - - config - - deck + protocol_type: + type: string + required: [] type: object data: properties: {} required: [] type: object + model: {} version: 1.0.0 diff --git a/unilabos/registry/devices/liquid_handler.yaml b/unilabos/registry/devices/liquid_handler.yaml index b0656d1..298eb70 100644 --- a/unilabos/registry/devices/liquid_handler.yaml +++ b/unilabos/registry/devices/liquid_handler.yaml @@ -9278,7 +9278,13 @@ liquid_handler.prcxi: z: 0.0 sample_id: '' type: '' - handles: {} + handles: + input: + - data_key: wells + data_source: handle + data_type: resource + handler_key: input_wells + label: InputWells placeholder_keys: wells: unilabos_resources result: {} diff --git a/unilabos/registry/devices/reaction_station_bioyond.yaml b/unilabos/registry/devices/reaction_station_bioyond.yaml index b7d10a6..cf46d7e 100644 --- a/unilabos/registry/devices/reaction_station_bioyond.yaml +++ b/unilabos/registry/devices/reaction_station_bioyond.yaml @@ -4,213 +4,88 @@ reaction_station.bioyond: - reaction_station_bioyond class: action_value_mappings: - auto-create_order: + add_time_constraint: feedback: {} - goal: {} + goal: + duration: duration + end_point: end_point + end_step_key: end_step_key + start_point: start_point + start_step_key: start_step_key goal_default: - json_str: null + duration: 0 + end_point: 0 + end_step_key: '' + start_point: 0 + start_step_key: '' handles: {} - placeholder_keys: {} result: {} schema: - description: '' + description: 添加时间约束 - 在两个工作流之间添加时间约束 properties: feedback: {} goal: properties: - json_str: - type: string - required: - - json_str - type: object - result: {} - required: - - goal - title: create_order参数 - type: object - type: UniLabJsonCommand - auto-hard_delete_merged_workflows: - feedback: {} - goal: {} - goal_default: - workflow_ids: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - workflow_ids: - items: - type: string - type: array - required: - - workflow_ids - type: object - result: {} - required: - - goal - title: hard_delete_merged_workflows参数 - type: object - type: UniLabJsonCommand - auto-merge_workflow_with_parameters: - feedback: {} - goal: {} - goal_default: - json_str: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - json_str: - type: string - required: - - json_str - type: object - result: {} - required: - - goal - title: merge_workflow_with_parameters参数 - type: object - type: UniLabJsonCommand - auto-process_temperature_cutoff_report: - feedback: {} - goal: {} - goal_default: - report_request: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - report_request: - type: string - required: - - report_request - type: object - result: {} - required: - - goal - title: process_temperature_cutoff_report参数 - type: object - type: UniLabJsonCommand - auto-process_web_workflows: - feedback: {} - goal: {} - goal_default: - web_workflow_json: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - web_workflow_json: - type: string - required: - - web_workflow_json - type: object - result: {} - required: - - goal - title: process_web_workflows参数 - type: object - type: UniLabJsonCommand - auto-skip_titration_steps: - feedback: {} - goal: {} - goal_default: - preintake_id: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - preintake_id: - type: string - required: - - preintake_id - type: object - result: {} - required: - - goal - title: skip_titration_steps参数 - type: object - type: UniLabJsonCommand - auto-wait_for_multiple_orders_and_get_reports: - feedback: {} - goal: {} - goal_default: - batch_create_result: null - check_interval: 10 - timeout: 7200 - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - batch_create_result: - type: string - check_interval: - default: 10 - type: integer - timeout: - default: 7200 + duration: + description: 时间(秒) type: integer + end_point: + default: Start + description: 终点计时点 (Start=开始前, End=结束后) + enum: + - Start + - End + type: string + end_step_key: + description: 终点步骤Key (可选, 默认为空则自动选择) + type: string + start_point: + default: Start + description: 起点计时点 (Start=开始前, End=结束后) + enum: + - Start + - End + type: string + start_step_key: + description: 起点步骤Key (例如 "feeding", "liquid", 可选, 默认为空则自动选择) + type: string + required: + - duration + type: object + result: {} + required: + - goal + title: add_time_constraint参数 + type: object + type: UniLabJsonCommand + clean_all_server_workflows: + feedback: {} + goal: {} + goal_default: {} + handles: {} + result: + code: code + message: message + schema: + description: 清空服务端所有非核心工作流 (保留核心流程) + properties: + feedback: {} + goal: + properties: {} required: [] type: object - result: {} - required: - - goal - title: wait_for_multiple_orders_and_get_reports参数 - type: object - type: UniLabJsonCommand - auto-workflow_step_query: - feedback: {} - goal: {} - goal_default: - workflow_id: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: + result: properties: - workflow_id: + code: + description: 操作结果代码(1表示成功) + type: integer + message: + description: 结果描述 type: string - required: - - workflow_id type: object - result: {} required: - goal - title: workflow_step_query参数 + title: clean_all_server_workflows参数 type: object type: UniLabJsonCommand drip_back: @@ -247,13 +122,19 @@ reaction_station.bioyond: description: 观察时间(分钟) type: string titration_type: - description: 是否滴定(1=否, 2=是) + description: 是否滴定(NO=否, YES=是) + enum: + - 'NO' + - 'YES' type: string torque_variation: - description: 是否观察 (1=否, 2=是) + description: 是否观察 (NO=否, YES=是) + enum: + - 'NO' + - 'YES' type: string volume: - description: 分液公式(μL) + description: 分液公式(mL) type: string required: - volume @@ -353,13 +234,19 @@ reaction_station.bioyond: description: 观察时间(分钟) type: string titration_type: - description: 是否滴定(1=否, 2=是) + description: 是否滴定(NO=否, YES=是) + enum: + - 'NO' + - 'YES' type: string torque_variation: - description: 是否观察 (1=否, 2=是) + description: 是否观察 (NO=否, YES=是) + enum: + - 'NO' + - 'YES' type: string volume: - description: 分液公式(μL) + description: 分液公式(mL) type: string required: - volume @@ -403,7 +290,7 @@ reaction_station.bioyond: label: Solvents Data From Calculation Node result: {} schema: - description: 液体投料-溶剂。可以直接提供volume(μL),或通过solvents对象自动从additional_solvent(mL)计算volume。 + description: 液体投料-溶剂。可以直接提供volume(mL),或通过solvents对象自动从additional_solvent(mL)计算volume。 properties: feedback: {} goal: @@ -423,15 +310,21 @@ reaction_station.bioyond: description: 观察时间(分钟),默认360 type: string titration_type: - default: '1' - description: 是否滴定(1=否, 2=是),默认1 + default: 'NO' + description: 是否滴定(NO=否, YES=是),默认NO + enum: + - 'NO' + - 'YES' type: string torque_variation: - default: '2' - description: 是否观察 (1=否, 2=是),默认2 + default: 'YES' + description: 是否观察 (NO=否, YES=是),默认YES + enum: + - 'NO' + - 'YES' type: string volume: - description: 分液量(μL)。可直接提供,或通过solvents参数自动计算 + description: 分液量(mL)。可直接提供,或通过solvents参数自动计算 type: string required: - assign_material_name @@ -504,15 +397,21 @@ reaction_station.bioyond: description: 观察时间(分钟),默认90 type: string titration_type: - default: '2' - description: 是否滴定(1=否, 2=是),默认2 + default: 'YES' + description: 是否滴定(NO=否, YES=是),默认YES + enum: + - 'NO' + - 'YES' type: string torque_variation: - default: '2' - description: 是否观察 (1=否, 2=是),默认2 + default: 'YES' + description: 是否观察 (NO=否, YES=是),默认YES + enum: + - 'NO' + - 'YES' type: string volume_formula: - description: 分液公式(μL)。可直接提供固定公式,或留空由系统根据x_value、feeding_order_data、extracted_actuals自动生成 + description: 分液公式(mL)。可直接提供固定公式,或留空由系统根据x_value、feeding_order_data、extracted_actuals自动生成 type: string x_value: description: 公式中的x值,手工输入,格式为"{{1-2-3}}"(包含双花括号)。用于自动公式计算 @@ -560,13 +459,19 @@ reaction_station.bioyond: description: 观察时间(分钟) type: string titration_type: - description: 是否滴定(1=否, 2=是) + description: 是否滴定(NO=否, YES=是) + enum: + - 'NO' + - 'YES' type: string torque_variation: - description: 是否观察 (1=否, 2=是) + description: 是否观察 (NO=否, YES=是) + enum: + - 'NO' + - 'YES' type: string volume_formula: - description: 分液公式(μL) + description: 分液公式(mL) type: string required: - volume_formula @@ -680,6 +585,35 @@ reaction_station.bioyond: title: reactor_taken_out参数 type: object type: UniLabJsonCommand + scheduler_start: + feedback: {} + goal: {} + goal_default: {} + handles: {} + result: + return_info: return_info + schema: + description: 启动调度器 - 启动Bioyond工作站的任务调度器,开始执行队列中的任务 + properties: + feedback: {} + goal: + properties: {} + required: [] + type: object + result: + properties: + return_info: + description: 调度器启动结果,成功返回1,失败返回0 + type: integer + required: + - return_info + title: scheduler_start结果 + type: object + required: + - goal + title: scheduler_start参数 + type: object + type: UniLabJsonCommand solid_feeding_vials: feedback: {} goal: @@ -706,7 +640,11 @@ reaction_station.bioyond: description: 物料名称(用于获取试剂瓶位ID) type: string material_id: - description: 粉末类型ID,1=盐(21分钟),2=面粉(27分钟),3=BTDA(38分钟) + description: 粉末类型ID,Salt=盐(21分钟),Flour=面粉(27分钟),BTDA=BTDA(38分钟) + enum: + - Salt + - Flour + - BTDA type: string temperature: description: 温度设定(°C) @@ -715,7 +653,10 @@ reaction_station.bioyond: description: 观察时间(分钟) type: string torque_variation: - description: 是否观察 (1=否, 2=是) + description: 是否观察 (NO=否, YES=是) + enum: + - 'NO' + - 'YES' type: string required: - assign_material_name @@ -730,9 +671,19 @@ reaction_station.bioyond: title: solid_feeding_vials参数 type: object type: UniLabJsonCommand - module: unilabos.devices.workstation.bioyond_studio.reaction_station:BioyondReactionStation + module: unilabos.devices.workstation.bioyond_studio.reaction_station.reaction_station:BioyondReactionStation protocol_type: [] status_types: + average_viscosity: Float64 + force: Float64 + in_temperature: Float64 + out_temperature: Float64 + pt100_temperature: Float64 + sensor_average_temperature: Float64 + setting_temperature: Float64 + speed: Float64 + target_temperature: Float64 + viscosity: Float64 workflow_sequence: String type: python config_info: [] @@ -765,34 +716,19 @@ reaction_station.reactor: - reactor - reaction_station_bioyond class: - action_value_mappings: - auto-update_metrics: - feedback: {} - goal: {} - goal_default: - payload: null - handles: {} - placeholder_keys: {} - result: {} - schema: - description: '' - properties: - feedback: {} - goal: - properties: - payload: - type: object - required: - - payload - type: object - result: {} - required: - - goal - title: update_metrics参数 - type: object - type: UniLabJsonCommand - module: unilabos.devices.workstation.bioyond_studio.reaction_station:BioyondReactor - status_types: {} + action_value_mappings: {} + module: unilabos.devices.workstation.bioyond_studio.reaction_station.reaction_station:BioyondReactor + status_types: + average_viscosity: Float64 + force: Float64 + in_temperature: Float64 + out_temperature: Float64 + pt100_temperature: Float64 + sensor_average_temperature: Float64 + setting_temperature: Float64 + speed: Float64 + target_temperature: Float64 + viscosity: Float64 type: python config_info: [] description: 反应站子设备-反应器 diff --git a/unilabos/registry/registry.py b/unilabos/registry/registry.py index 20e0245..64aba3c 100644 --- a/unilabos/registry/registry.py +++ b/unilabos/registry/registry.py @@ -124,11 +124,25 @@ class Registry: "output": [ { "handler_key": "labware", - "label": "Labware", "data_type": "resource", - "data_source": "handle", - "data_key": "liquid", - } + "label": "Labware", + "data_source": "executor", + "data_key": "created_resource_tree.@flatten", + }, + { + "handler_key": "liquid_slots", + "data_type": "resource", + "label": "LiquidSlots", + "data_source": "executor", + "data_key": "liquid_input_resource_tree.@flatten", + }, + { + "handler_key": "materials", + "data_type": "resource", + "label": "AllMaterials", + "data_source": "executor", + "data_key": "[created_resource_tree,liquid_input_resource_tree].@flatten.@flatten", + }, ] }, "placeholder_keys": { @@ -186,7 +200,17 @@ class Registry: "resources": "unilabos_resources", }, "goal_default": {}, - "handles": {}, + "handles": { + "input": [ + { + "handler_key": "input_resources", + "data_type": "resource", + "label": "InputResources", + "data_source": "handle", + "data_key": "resources", # 不为空 + }, + ] + }, }, }, }, diff --git a/unilabos/registry/resources/bioyond/bottles.yaml b/unilabos/registry/resources/bioyond/bottles.yaml index 79aa712..ecc5525 100644 --- a/unilabos/registry/resources/bioyond/bottles.yaml +++ b/unilabos/registry/resources/bioyond/bottles.yaml @@ -20,6 +20,17 @@ BIOYOND_PolymerStation_Liquid_Vial: icon: '' init_param_schema: {} version: 1.0.0 +BIOYOND_PolymerStation_Measurement_Vial: + category: + - bottles + class: + module: unilabos.resources.bioyond.bottles:BIOYOND_PolymerStation_Measurement_Vial + type: pylabrobot + description: 聚合站-测量小瓶(测密度) + handles: [] + icon: '' + init_param_schema: {} + version: 1.0.0 BIOYOND_PolymerStation_Reactor: category: - bottles diff --git a/unilabos/resources/bioyond/bottles.py b/unilabos/resources/bioyond/bottles.py index d60d65a..7045d8b 100644 --- a/unilabos/resources/bioyond/bottles.py +++ b/unilabos/resources/bioyond/bottles.py @@ -193,3 +193,20 @@ def BIOYOND_PolymerStation_Flask( barcode=barcode, model="BIOYOND_PolymerStation_Flask", ) + +def BIOYOND_PolymerStation_Measurement_Vial( + name: str, + diameter: float = 25.0, + height: float = 60.0, + max_volume: float = 20000.0, # 20mL + barcode: str = None, +) -> Bottle: + """创建测量小瓶""" + return Bottle( + name=name, + diameter=diameter, + height=height, + max_volume=max_volume, + barcode=barcode, + model="BIOYOND_PolymerStation_Measurement_Vial", + ) diff --git a/unilabos/resources/bioyond/decks.py b/unilabos/resources/bioyond/decks.py index 03c7d73..392b4a9 100644 --- a/unilabos/resources/bioyond/decks.py +++ b/unilabos/resources/bioyond/decks.py @@ -51,20 +51,17 @@ class BIOYOND_PolymerReactionStation_Deck(Deck): "测量小瓶仓库(测密度)": bioyond_warehouse_density_vial("测量小瓶仓库(测密度)"), # A01~B03 } self.warehouse_locations = { - "堆栈1左": Coordinate(0.0, 430.0, 0.0), # 左侧位置 - "堆栈1右": Coordinate(2500.0, 430.0, 0.0), # 右侧位置 - "站内试剂存放堆栈": Coordinate(640.0, 480.0, 0.0), + "堆栈1左": Coordinate(-200.0, 450.0, 0.0), # 左侧位置 + "堆栈1右": Coordinate(2350.0, 450.0, 0.0), # 右侧位置 + "站内试剂存放堆栈": Coordinate(730.0, 390.0, 0.0), # "移液站内10%分装液体准备仓库": Coordinate(1200.0, 600.0, 0.0), "站内Tip盒堆栈": Coordinate(300.0, 150.0, 0.0), - "测量小瓶仓库(测密度)": Coordinate(922.0, 552.0, 0.0), + "测量小瓶仓库(测密度)": Coordinate(940.0, 530.0, 0.0), } - self.warehouses["站内试剂存放堆栈"].rotation = Rotation(z=90) - self.warehouses["测量小瓶仓库(测密度)"].rotation = Rotation(z=270) for warehouse_name, warehouse in self.warehouses.items(): self.assign_child_resource(warehouse, location=self.warehouse_locations[warehouse_name]) - class BIOYOND_PolymerPreparationStation_Deck(Deck): def __init__( self, @@ -148,6 +145,7 @@ class BIOYOND_YB_Deck(Deck): for warehouse_name, warehouse in self.warehouses.items(): self.assign_child_resource(warehouse, location=self.warehouse_locations[warehouse_name]) + def YB_Deck(name: str) -> Deck: by=BIOYOND_YB_Deck(name=name) by.setup() diff --git a/unilabos/resources/bioyond/warehouses.py b/unilabos/resources/bioyond/warehouses.py index ae9e473..503dadb 100644 --- a/unilabos/resources/bioyond/warehouses.py +++ b/unilabos/resources/bioyond/warehouses.py @@ -46,41 +46,55 @@ def bioyond_warehouse_1x4x4_right(name: str) -> WareHouse: ) def bioyond_warehouse_density_vial(name: str) -> WareHouse: - """创建测量小瓶仓库(测密度) A01~B03""" + """创建测量小瓶仓库(测密度) - 竖向排列2列3行 + 布局(从下到上,从左到右): + | A03 | B03 | ← 顶部 + | A02 | B02 | ← 中部 + | A01 | B01 | ← 底部 + """ return warehouse_factory( name=name, - num_items_x=3, # 3列(01-03) - num_items_y=2, # 2行(A-B) + num_items_x=2, # 2列(A, B) + num_items_y=3, # 3行(01-03,从下到上) num_items_z=1, # 1层 dx=10.0, dy=10.0, dz=10.0, - item_dx=40.0, - item_dy=40.0, + item_dx=40.0, # 列间距(A到B的横向距离) + item_dy=40.0, # 行间距(01到02到03的竖向距离) item_dz=50.0, - # 用更小的 resource_size 来表现 "小点的孔位" + # ⭐ 竖向warehouse:槽位尺寸也是竖向的(小瓶已经是正方形,无需调整) resource_size_x=30.0, resource_size_y=30.0, resource_size_z=12.0, category="warehouse", col_offset=0, - layout="row-major", + layout="vertical-col-major", # ⭐ 竖向warehouse专用布局 ) def bioyond_warehouse_reagent_storage(name: str) -> WareHouse: - """创建BioYond站内试剂存放堆栈(A01~A02, 1行×2列)""" + """创建BioYond站内试剂存放堆栈 - 竖向排列1列2行 + 布局(竖向,从下到上): + | A02 | ← 顶部 + | A01 | ← 底部 + """ return warehouse_factory( name=name, - num_items_x=2, # 2列(01-02) - num_items_y=1, # 1行(A) + num_items_x=1, # 1列 + num_items_y=2, # 2行(01-02,从下到上) num_items_z=1, # 1层 dx=10.0, dy=10.0, dz=10.0, - item_dx=137.0, - item_dy=96.0, + item_dx=96.0, # 列间距(这里只有1列,不重要) + item_dy=137.0, # 行间距(A01到A02的竖向距离) item_dz=120.0, + # ⭐ 竖向warehouse:交换槽位尺寸,使槽位框也是竖向的 + resource_size_x=86.0, # 原来的 resource_size_y + resource_size_y=127.0, # 原来的 resource_size_x + resource_size_z=25.0, category="warehouse", + layout="vertical-col-major", # ⭐ 竖向warehouse专用布局 ) def bioyond_warehouse_tipbox_storage(name: str) -> WareHouse: diff --git a/unilabos/resources/container.py b/unilabos/resources/container.py index f977244..fe19bac 100644 --- a/unilabos/resources/container.py +++ b/unilabos/resources/container.py @@ -27,7 +27,7 @@ class RegularContainer(Container): def get_regular_container(name="container"): r = RegularContainer(name=name) r.category = "container" - return RegularContainer(name=name) + return r # # class RegularContainer(object): diff --git a/unilabos/resources/graphio.py b/unilabos/resources/graphio.py index faf0482..e46a4cf 100644 --- a/unilabos/resources/graphio.py +++ b/unilabos/resources/graphio.py @@ -779,6 +779,22 @@ def resource_bioyond_to_plr(bioyond_materials: list[dict], type_mapping: Dict[st if not locations: logger.debug(f"[物料位置] {unique_name} 没有location信息,跳过warehouse放置") + # ⭐ 预先检查:如果物料的任何location在竖向warehouse中,提前交换尺寸 + # 这样可以避免多个location时尺寸不一致的问题 + needs_size_swap = False + for loc in locations: + wh_name_check = loc.get("whName") + if wh_name_check in ["站内试剂存放堆栈", "测量小瓶仓库(测密度)"]: + needs_size_swap = True + break + + if needs_size_swap and hasattr(plr_material, 'size_x') and hasattr(plr_material, 'size_y'): + original_x = plr_material.size_x + original_y = plr_material.size_y + plr_material.size_x = original_y + plr_material.size_y = original_x + logger.debug(f" 物料 {unique_name} 将放入竖向warehouse,预先交换尺寸: {original_x}×{original_y} → {plr_material.size_x}×{plr_material.size_y}") + for loc in locations: wh_name = loc.get("whName") logger.debug(f"[物料位置] {unique_name} 尝试放置到 warehouse: {wh_name} (Bioyond坐标: x={loc.get('x')}, y={loc.get('y')}, z={loc.get('z')})") @@ -800,7 +816,6 @@ def resource_bioyond_to_plr(bioyond_materials: list[dict], type_mapping: Dict[st logger.debug(f"[Warehouse匹配] 找到warehouse: {wh_name} (容量: {warehouse.capacity}, 行×列: {warehouse.num_items_x}×{warehouse.num_items_y})") # Bioyond坐标映射 (重要!): x→行(1=A,2=B...), y→列(1=01,2=02...), z→层(通常=1) - # PyLabRobot warehouse是列优先存储: A01,B01,C01,D01, A02,B02,C02,D02, ... x = loc.get("x", 1) # 行号 (1-based: 1=A, 2=B, 3=C, 4=D) y = loc.get("y", 1) # 列号 (1-based: 1=01, 2=02, 3=03...) z = loc.get("z", 1) # 层号 (1-based, 通常为1) @@ -809,12 +824,23 @@ def resource_bioyond_to_plr(bioyond_materials: list[dict], type_mapping: Dict[st if wh_name == "堆栈1右": y = y - 4 # 将5-8映射到1-4 - # 特殊处理:对于1行×N列的横向warehouse(如站内试剂存放堆栈) - # Bioyond的y坐标表示线性位置序号,而不是列号 - if warehouse.num_items_y == 1: - # 1行warehouse: 直接用y作为线性索引 - idx = y - 1 - logger.debug(f"1行warehouse {wh_name}: y={y} → idx={idx}") + # 特殊处理竖向warehouse(站内试剂存放堆栈、测量小瓶仓库) + # 这些warehouse使用 vertical-col-major 布局 + if wh_name in ["站内试剂存放堆栈", "测量小瓶仓库(测密度)"]: + # vertical-col-major 布局的坐标映射: + # - Bioyond的x(1=A,2=B)对应warehouse的列(col, x方向) + # - Bioyond的y(1=01,2=02,3=03)对应warehouse的行(row, y方向),从下到上 + # vertical-col-major 中: row=0 对应底部,row=n-1 对应顶部 + # Bioyond y=1(01) 对应底部 → row=0, y=2(02) 对应中间 → row=1 + # 索引计算: idx = row * num_cols + col + col_idx = x - 1 # Bioyond的x(A,B) → col索引(0,1) + row_idx = y - 1 # Bioyond的y(01,02,03) → row索引(0,1,2) + layer_idx = z - 1 + + idx = layer_idx * (warehouse.num_items_x * warehouse.num_items_y) + row_idx * warehouse.num_items_x + col_idx + logger.debug(f"🔍 竖向warehouse {wh_name}: Bioyond(x={x},y={y},z={z}) → warehouse(col={col_idx},row={row_idx},layer={layer_idx}) → idx={idx}, capacity={warehouse.capacity}") + + # 普通横向warehouse的处理 else: # 多行warehouse: 根据 layout 使用不同的索引计算 row_idx = x - 1 # x表示行: 转为0-based @@ -838,6 +864,7 @@ def resource_bioyond_to_plr(bioyond_materials: list[dict], type_mapping: Dict[st if 0 <= idx < warehouse.capacity: if warehouse[idx] is None or isinstance(warehouse[idx], ResourceHolder): + # 物料尺寸已在放入warehouse前根据需要进行了交换 warehouse[idx] = plr_material logger.debug(f"✅ 物料 {unique_name} 放置到 {wh_name}[{idx}] (Bioyond坐标: x={loc.get('x')}, y={loc.get('y')})") else: @@ -1011,11 +1038,24 @@ def resource_plr_to_bioyond(plr_resources: list[ResourcePLR], type_mapping: dict logger.debug(f" 📭 [单瓶物料] {resource.name} 无液体,使用资源名: {material_name}") # 🎯 处理物料默认参数和单位 - # 检查是否有该物料名称的默认参数配置 + # 优先级: typeId参数 > 物料名称参数 > 默认值 default_unit = "个" # 默认单位 material_parameters = {} - if material_name in material_params: + # 1️⃣ 首先检查是否有 typeId 对应的参数配置(从 material_params 中获取,key 格式为 "type:") + type_params_key = f"type:{type_id}" + if type_params_key in material_params: + params_config = material_params[type_params_key].copy() + + # 提取 unit 字段(如果有) + if "unit" in params_config: + default_unit = params_config.pop("unit") # 从参数中移除,放到外层 + + # 剩余的字段放入 Parameters + material_parameters = params_config + logger.debug(f" 🔧 [物料参数-按typeId] 为 typeId={type_id[:8]}... 应用配置: unit={default_unit}, parameters={material_parameters}") + # 2️⃣ 其次检查是否有该物料名称的默认参数配置 + elif material_name in material_params: params_config = material_params[material_name].copy() # 提取 unit 字段(如果有) @@ -1024,7 +1064,7 @@ def resource_plr_to_bioyond(plr_resources: list[ResourcePLR], type_mapping: dict # 剩余的字段放入 Parameters material_parameters = params_config - logger.debug(f" 🔧 [物料参数] 为 {material_name} 应用配置: unit={default_unit}, parameters={material_parameters}") + logger.debug(f" 🔧 [物料参数-按名称] 为 {material_name} 应用配置: unit={default_unit}, parameters={material_parameters}") # 转换为 JSON 字符串 parameters_json = json.dumps(material_parameters) if material_parameters else "{}" @@ -1151,11 +1191,7 @@ def initialize_resource(resource_config: dict, resource_type: Any = None) -> Uni if resource_class_config["type"] == "pylabrobot": resource_plr = RESOURCE(name=resource_config["name"]) if resource_type != ResourcePLR: - tree_sets = ResourceTreeSet.from_plr_resources([resource_plr]) - # r = resource_plr_to_ulab(resource_plr=resource_plr, parent_name=resource_config.get("parent", None)) - # # r = resource_plr_to_ulab(resource_plr=resource_plr) - # if resource_config.get("position") is not None: - # r["position"] = resource_config["position"] + tree_sets = ResourceTreeSet.from_plr_resources([resource_plr], known_newly_created=True) r = tree_sets.dump() else: r = resource_plr diff --git a/unilabos/resources/itemized_carrier.py b/unilabos/resources/itemized_carrier.py index 90c7ec0..fe55c39 100644 --- a/unilabos/resources/itemized_carrier.py +++ b/unilabos/resources/itemized_carrier.py @@ -50,12 +50,45 @@ class Bottle(Well): self.barcode = barcode def serialize(self) -> dict: + # Pylabrobot expects barcode to be an object with serialize(), but here it is a str. + # We temporarily unset it to avoid AttributeError in super().serialize(). + _barcode = self.barcode + self.barcode = None + try: + data = super().serialize() + finally: + self.barcode = _barcode + return { - **super().serialize(), + **data, "diameter": self.diameter, "height": self.height, } + @classmethod + def deserialize(cls, data: dict, allow_marshal: bool = False): + # Extract barcode before calling parent deserialize to avoid type error + barcode_data = data.pop("barcode", None) + + # Call parent deserialize + instance = super(Bottle, cls).deserialize(data, allow_marshal=allow_marshal) + + # Set barcode as string (not as Barcode object) + if barcode_data: + if isinstance(barcode_data, str): + instance.barcode = barcode_data + elif isinstance(barcode_data, dict): + # If it's a dict (Barcode serialized format), extract the data field + instance.barcode = barcode_data.get("data", "") + else: + instance.barcode = "" + + # Set additional attributes + instance.diameter = data.get("diameter", instance._size_x) + instance.height = data.get("height", instance._size_z) + + return instance + T = TypeVar("T", bound=ResourceHolder) S = TypeVar("S", bound=ResourceHolder) diff --git a/unilabos/resources/resource_tracker.py b/unilabos/resources/resource_tracker.py index 610ba3d..4097782 100644 --- a/unilabos/resources/resource_tracker.py +++ b/unilabos/resources/resource_tracker.py @@ -1,7 +1,7 @@ import inspect import traceback import uuid -from pydantic import BaseModel, field_serializer, field_validator +from pydantic import BaseModel, field_serializer, field_validator, ValidationError from pydantic import Field from typing import List, Tuple, Any, Dict, Literal, Optional, cast, TYPE_CHECKING, Union @@ -147,20 +147,24 @@ class ResourceDictInstance(object): if not content.get("extra"): # MagicCode content["extra"] = {} if "position" in content: - pose = content.get("pose",{}) - if "position" not in pose : + pose = content.get("pose", {}) + if "position" not in pose: if "position" in content["position"]: pose["position"] = content["position"]["position"] else: pose["position"] = {"x": 0, "y": 0, "z": 0} if "size" not in pose: pose["size"] = { - "width": content["config"].get("size_x", 0), - "height": content["config"].get("size_y", 0), - "depth": content["config"].get("size_z", 0) + "width": content["config"].get("size_x", 0), + "height": content["config"].get("size_y", 0), + "depth": content["config"].get("size_z", 0), } content["pose"] = pose - return ResourceDictInstance(ResourceDict.model_validate(content)) + try: + res_dict = ResourceDict.model_validate(content) + return ResourceDictInstance(res_dict) + except ValidationError as err: + raise err def get_plr_nested_dict(self) -> Dict[str, Any]: """获取资源实例的嵌套字典表示""" @@ -322,7 +326,7 @@ class ResourceTreeSet(object): ) @classmethod - def from_plr_resources(cls, resources: List["PLRResource"]) -> "ResourceTreeSet": + def from_plr_resources(cls, resources: List["PLRResource"], known_newly_created=False) -> "ResourceTreeSet": """ 从plr资源创建ResourceTreeSet """ @@ -339,6 +343,8 @@ class ResourceTreeSet(object): } if source in replace_info: return replace_info[source] + elif source is None: + return "" else: print("转换pylabrobot的时候,出现未知类型", source) return source @@ -349,7 +355,8 @@ class ResourceTreeSet(object): if not uid: uid = str(uuid.uuid4()) res.unilabos_uuid = uid - logger.warning(f"{res}没有uuid,请设置后再传入,默认填充{uid}!\n{traceback.format_exc()}") + if not known_newly_created: + logger.warning(f"{res}没有uuid,请设置后再传入,默认填充{uid}!\n{traceback.format_exc()}") # 获取unilabos_extra,默认为空字典 extra = getattr(res, "unilabos_extra", {}) @@ -448,7 +455,13 @@ class ResourceTreeSet(object): from pylabrobot.utils.object_parsing import find_subclass # 类型映射 - TYPE_MAP = {"plate": "Plate", "well": "Well", "deck": "Deck", "container": "RegularContainer", "tip_spot": "TipSpot"} + TYPE_MAP = { + "plate": "Plate", + "well": "Well", + "deck": "Deck", + "container": "RegularContainer", + "tip_spot": "TipSpot", + } def collect_node_data(node: ResourceDictInstance, name_to_uuid: dict, all_states: dict, name_to_extra: dict): """一次遍历收集 name_to_uuid, all_states 和 name_to_extra""" @@ -608,6 +621,16 @@ class ResourceTreeSet(object): """ return [tree.root_node for tree in self.trees] + @property + def root_nodes_uuid(self) -> List[ResourceDictInstance]: + """ + 获取所有树的根节点 + + Returns: + 所有根节点的资源实例列表 + """ + return [tree.root_node.res_content.uuid for tree in self.trees] + @property def all_nodes(self) -> List[ResourceDictInstance]: """ @@ -918,6 +941,33 @@ class DeviceNodeResourceTracker(object): return self._traverse_and_process(resource, process) + def loop_find_with_uuid(self, resource, target_uuid: str): + """ + 递归遍历资源树,根据 uuid 查找并返回对应的资源 + + Args: + resource: 资源对象(可以是list、dict或实例) + target_uuid: 要查找的uuid + + Returns: + 找到的资源对象,未找到则返回None + """ + found_resource = None + + def process(res): + nonlocal found_resource + if found_resource is not None: + return 0 # 已找到,跳过后续处理 + current_uuid = self._get_resource_attr(res, "uuid", "unilabos_uuid") + if current_uuid and current_uuid == target_uuid: + found_resource = res + logger.trace(f"找到资源UUID: {target_uuid}") + return 1 + return 0 + + self._traverse_and_process(resource, process) + return found_resource + def loop_set_extra(self, resource, name_to_extra_map: Dict[str, dict]) -> int: """ 递归遍历资源树,根据 name 设置所有节点的 extra @@ -1103,7 +1153,7 @@ class DeviceNodeResourceTracker(object): for key in keys_to_remove: self.resource2parent_resource.pop(key, None) - logger.debug(f"成功移除资源: {resource}") + logger.trace(f"[ResourceTracker] 成功移除资源: {resource}") return True def clear_resource(self): diff --git a/unilabos/resources/warehouse.py b/unilabos/resources/warehouse.py index 3dbd6ad..929a4e4 100644 --- a/unilabos/resources/warehouse.py +++ b/unilabos/resources/warehouse.py @@ -43,6 +43,10 @@ def warehouse_factory( if layout == "row-major": # 行优先:row=0(A行) 应该显示在上方,需要较小的 y 值 y = dy + row * item_dy + elif layout == "vertical-col-major": + # 竖向warehouse: row=0 对应顶部(y小),row=n-1 对应底部(y大) + # 但标签 01 应该在底部,所以使用反向映射 + y = dy + (num_items_y - row - 1) * item_dy else: # 列优先:保持原逻辑(row=0 对应较大的 y) y = dy + (num_items_y - row - 1) * item_dy diff --git a/unilabos/ros/msgs/message_converter.py b/unilabos/ros/msgs/message_converter.py index e8570d3..632d5e1 100644 --- a/unilabos/ros/msgs/message_converter.py +++ b/unilabos/ros/msgs/message_converter.py @@ -159,10 +159,14 @@ _msg_converter: Dict[Type, Any] = { else Pose() ), config=json.dumps(x.get("config", {})), - data=json.dumps(x.get("data", {})), + data=json.dumps(obtain_data_with_uuid(x)), ), } +def obtain_data_with_uuid(x: dict): + data = x.get("data", {}) + data["unilabos_uuid"] = x.get("uuid", None) + return data def json_or_yaml_loads(data: str) -> Any: try: diff --git a/unilabos/ros/nodes/base_device_node.py b/unilabos/ros/nodes/base_device_node.py index 89c4d39..8a88023 100644 --- a/unilabos/ros/nodes/base_device_node.py +++ b/unilabos/ros/nodes/base_device_node.py @@ -430,11 +430,14 @@ class BaseROS2DeviceNode(Node, Generic[T]): }) tree_response: SerialCommand.Response = await client.call_async(request) uuid_maps = json.loads(tree_response.response) - self.resource_tracker.loop_update_uuid(input_resources, uuid_maps) + plr_instances = rts.to_plr_resources() + for plr_instance in plr_instances: + self.resource_tracker.loop_update_uuid(plr_instance, uuid_maps) + rts: ResourceTreeSet = ResourceTreeSet.from_plr_resources(plr_instances) self.lab_logger().info(f"Resource tree added. UUID mapping: {len(uuid_maps)} nodes") final_response = { - "created_resources": rts.dump(), - "liquid_input_resources": [], + "created_resource_tree": rts.dump(), + "liquid_input_resource_tree": [], } res.response = json.dumps(final_response) # 如果driver自己就有assign的方法,那就使用driver自己的assign方法 @@ -460,7 +463,7 @@ class BaseROS2DeviceNode(Node, Generic[T]): return res try: if len(rts.root_nodes) == 1 and parent_resource is not None: - plr_instance = rts.to_plr_resources()[0] + plr_instance = plr_instances[0] if isinstance(plr_instance, Plate): empty_liquid_info_in: List[Tuple[Optional[str], float]] = [(None, 0)] * plr_instance.num_items if len(ADD_LIQUID_TYPE) == 1 and len(LIQUID_VOLUME) == 1 and len(LIQUID_INPUT_SLOT) > 1: @@ -485,7 +488,7 @@ class BaseROS2DeviceNode(Node, Generic[T]): input_wells = [] for r in LIQUID_INPUT_SLOT: input_wells.append(plr_instance.children[r]) - final_response["liquid_input_resources"] = ResourceTreeSet.from_plr_resources(input_wells).dump() + final_response["liquid_input_resource_tree"] = ResourceTreeSet.from_plr_resources(input_wells).dump() res.response = json.dumps(final_response) if issubclass(parent_resource.__class__, Deck) and hasattr(parent_resource, "assign_child_at_slot") and "slot" in other_calling_param: other_calling_param["slot"] = int(other_calling_param["slot"]) @@ -619,7 +622,7 @@ class BaseROS2DeviceNode(Node, Generic[T]): ) # type: ignore raw_nodes = json.loads(response.response) tree_set = ResourceTreeSet.from_raw_dict_list(raw_nodes) - self.lab_logger().debug(f"获取资源结果: {len(tree_set.trees)} 个资源树") + self.lab_logger().trace(f"获取资源结果: {len(tree_set.trees)} 个资源树 {tree_set.root_nodes}") return tree_set async def get_resource_with_dir(self, resource_id: str, with_children: bool = True) -> "ResourcePLR": @@ -653,61 +656,71 @@ class BaseROS2DeviceNode(Node, Generic[T]): def transfer_to_new_resource( self, plr_resource: "ResourcePLR", tree: ResourceTreeInstance, additional_add_params: Dict[str, Any] - ): + ) -> Optional["ResourcePLR"]: parent_uuid = tree.root_node.res_content.parent_uuid - if parent_uuid: - parent_resource: ResourcePLR = self.resource_tracker.uuid_to_resources.get(parent_uuid) - if parent_resource is None: + if not parent_uuid: + self.lab_logger().warning( + f"物料{plr_resource} parent未知,挂载到当前节点下,额外参数:{additional_add_params}" + ) + return None + if parent_uuid == self.uuid: + self.lab_logger().warning( + f"物料{plr_resource}请求挂载到{self.identifier},额外参数:{additional_add_params}" + ) + return None + parent_resource: ResourcePLR = self.resource_tracker.uuid_to_resources.get(parent_uuid) + if parent_resource is None: + self.lab_logger().warning( + f"物料{plr_resource}请求挂载{tree.root_node.res_content.name}的父节点{parent_uuid}不存在" + ) + else: + try: + # 特殊兼容所有plr的物料的assign方法,和create_resource append_resource后期同步 + additional_params = {} + extra = getattr(plr_resource, "unilabos_extra", {}) + if len(extra): + self.lab_logger().info(f"发现物料{plr_resource}额外参数: " + str(extra)) + if "update_resource_site" in extra: + additional_add_params["site"] = extra["update_resource_site"] + site = additional_add_params.get("site", None) + spec = inspect.signature(parent_resource.assign_child_resource) + if "spot" in spec.parameters: + ordering_dict: Dict[str, Any] = getattr(parent_resource, "_ordering") + if ordering_dict: + site = list(ordering_dict.keys()).index(site) + additional_params["spot"] = site + old_parent = plr_resource.parent + if old_parent is not None: + # plr并不支持同一个deck的加载和卸载 + self.lab_logger().warning(f"物料{plr_resource}请求从{old_parent}卸载") + old_parent.unassign_child_resource(plr_resource) self.lab_logger().warning( - f"物料{plr_resource}请求挂载{tree.root_node.res_content.name}的父节点{parent_uuid}不存在" + f"物料{plr_resource}请求挂载到{parent_resource},额外参数:{additional_params}" ) - else: - try: - # 特殊兼容所有plr的物料的assign方法,和create_resource append_resource后期同步 - additional_params = {} - extra = getattr(plr_resource, "unilabos_extra", {}) - if len(extra): - self.lab_logger().info(f"发现物料{plr_resource}额外参数: " + str(extra)) - if "update_resource_site" in extra: - additional_add_params["site"] = extra["update_resource_site"] - site = additional_add_params.get("site", None) - spec = inspect.signature(parent_resource.assign_child_resource) - if "spot" in spec.parameters: - ordering_dict: Dict[str, Any] = getattr(parent_resource, "_ordering") - if ordering_dict: - site = list(ordering_dict.keys()).index(site) - additional_params["spot"] = site - old_parent = plr_resource.parent - if old_parent is not None: - # plr并不支持同一个deck的加载和卸载 - self.lab_logger().warning(f"物料{plr_resource}请求从{old_parent}卸载") - old_parent.unassign_child_resource(plr_resource) - self.lab_logger().warning( - f"物料{plr_resource}请求挂载到{parent_resource},额外参数:{additional_params}" - ) - # ⭐ assign 之前,需要从 resources 列表中移除 - # 因为资源将不再是顶级资源,而是成为 parent_resource 的子资源 - # 如果不移除,figure_resource 会找到两次:一次在 resources,一次在 parent 的 children - resource_id = id(plr_resource) - for i, r in enumerate(self.resource_tracker.resources): - if id(r) == resource_id: - self.resource_tracker.resources.pop(i) - self.lab_logger().debug( - f"从顶级资源列表中移除 {plr_resource.name}(即将成为 {parent_resource.name} 的子资源)" - ) - break + # ⭐ assign 之前,需要从 resources 列表中移除 + # 因为资源将不再是顶级资源,而是成为 parent_resource 的子资源 + # 如果不移除,figure_resource 会找到两次:一次在 resources,一次在 parent 的 children + resource_id = id(plr_resource) + for i, r in enumerate(self.resource_tracker.resources): + if id(r) == resource_id: + self.resource_tracker.resources.pop(i) + self.lab_logger().debug( + f"从顶级资源列表中移除 {plr_resource.name}(即将成为 {parent_resource.name} 的子资源)" + ) + break - parent_resource.assign_child_resource(plr_resource, location=None, **additional_params) + parent_resource.assign_child_resource(plr_resource, location=None, **additional_params) - func = getattr(self.driver_instance, "resource_tree_transfer", None) - if callable(func): - # 分别是 物料的原来父节点,当前物料的状态,物料的新父节点(此时物料已经重新assign了) - func(old_parent, plr_resource, parent_resource) - except Exception as e: - self.lab_logger().warning( - f"物料{plr_resource}请求挂载{tree.root_node.res_content.name}的父节点{parent_resource}[{parent_uuid}]失败!\n{traceback.format_exc()}" - ) + func = getattr(self.driver_instance, "resource_tree_transfer", None) + if callable(func): + # 分别是 物料的原来父节点,当前物料的状态,物料的新父节点(此时物料已经重新assign了) + func(old_parent, plr_resource, parent_resource) + return parent_resource + except Exception as e: + self.lab_logger().warning( + f"物料{plr_resource}请求挂载{tree.root_node.res_content.name}的父节点{parent_resource}[{parent_uuid}]失败!\n{traceback.format_exc()}" + ) async def s2c_resource_tree(self, req: SerialCommand_Request, res: SerialCommand_Response): """ @@ -722,7 +735,7 @@ class BaseROS2DeviceNode(Node, Generic[T]): def _handle_add( plr_resources: List[ResourcePLR], tree_set: ResourceTreeSet, additional_add_params: Dict[str, Any] - ) -> Dict[str, Any]: + ) -> Tuple[Dict[str, Any], List[ResourcePLR]]: """ 处理资源添加操作的内部函数 @@ -734,15 +747,20 @@ class BaseROS2DeviceNode(Node, Generic[T]): Returns: 操作结果字典 """ + parents = [] # 放的是被变更的物料 / 被变更的物料父级 for plr_resource, tree in zip(plr_resources, tree_set.trees): self.resource_tracker.add_resource(plr_resource) - self.transfer_to_new_resource(plr_resource, tree, additional_add_params) + parent = self.transfer_to_new_resource(plr_resource, tree, additional_add_params) + if parent is not None: + parents.append(parent) + else: + parents.append(plr_resource) func = getattr(self.driver_instance, "resource_tree_add", None) if callable(func): func(plr_resources) - return {"success": True, "action": "add"} + return {"success": True, "action": "add"}, parents def _handle_remove(resources_uuid: List[str]) -> Dict[str, Any]: """ @@ -777,11 +795,11 @@ class BaseROS2DeviceNode(Node, Generic[T]): if plr_resource.parent is not None: plr_resource.parent.unassign_child_resource(plr_resource) self.resource_tracker.remove_resource(plr_resource) - self.lab_logger().info(f"移除物料 {plr_resource} 及其子节点") + self.lab_logger().info(f"[资源同步] 移除物料 {plr_resource} 及其子节点") for other_plr_resource in other_plr_resources: self.resource_tracker.remove_resource(other_plr_resource) - self.lab_logger().info(f"移除物料 {other_plr_resource} 及其子节点") + self.lab_logger().info(f"[资源同步] 移除物料 {other_plr_resource} 及其子节点") return { "success": True, @@ -813,11 +831,16 @@ class BaseROS2DeviceNode(Node, Generic[T]): original_instance: ResourcePLR = self.resource_tracker.figure_resource( {"uuid": tree.root_node.res_content.uuid}, try_mode=False ) + original_parent_resource = original_instance.parent + original_parent_resource_uuid = getattr(original_parent_resource, "unilabos_uuid", None) + target_parent_resource_uuid = tree.root_node.res_content.uuid_parent + not_same_parent = original_parent_resource_uuid != target_parent_resource_uuid and original_parent_resource is not None + old_name = original_instance.name + new_name = plr_resource.name + parent_appended = False - # Update操作中包含改名:需要先remove再add - if original_instance.name != plr_resource.name: - old_name = original_instance.name - new_name = plr_resource.name + # Update操作中包含改名:需要先remove再add,这里更新父节点即可 + if not not_same_parent and old_name != new_name: self.lab_logger().info(f"物料改名操作:{old_name} -> {new_name}") # 收集所有相关的uuid(包括子节点) @@ -826,12 +849,10 @@ class BaseROS2DeviceNode(Node, Generic[T]): _handle_add([original_instance], tree_set, additional_add_params) self.lab_logger().info(f"物料改名完成:{old_name} -> {new_name}") + original_instances.append(original_parent_resource) + parent_appended = True # 常规更新:不涉及改名 - original_parent_resource = original_instance.parent - original_parent_resource_uuid = getattr(original_parent_resource, "unilabos_uuid", None) - target_parent_resource_uuid = tree.root_node.res_content.uuid_parent - self.lab_logger().info( f"物料{original_instance} 原始父节点{original_parent_resource_uuid} " f"目标父节点{target_parent_resource_uuid} 更新" @@ -842,13 +863,12 @@ class BaseROS2DeviceNode(Node, Generic[T]): original_instance.unilabos_extra = getattr(plr_resource, "unilabos_extra") # type: ignore # noqa: E501 # 如果父节点变化,需要重新挂载 - if ( - original_parent_resource_uuid != target_parent_resource_uuid - and original_parent_resource is not None - ): - self.transfer_to_new_resource(original_instance, tree, additional_add_params) + if not_same_parent: + parent = self.transfer_to_new_resource(original_instance, tree, additional_add_params) + original_instances.append(parent) + parent_appended = True else: - # 判断是否变更了resource_site + # 判断是否变更了resource_site,重新登记 target_site = original_instance.unilabos_extra.get("update_resource_site") sites = original_instance.parent.sites if original_instance.parent is not None and hasattr(original_instance.parent, "sites") else None site_names = list(original_instance.parent._ordering.keys()) if original_instance.parent is not None and hasattr(original_instance.parent, "sites") else [] @@ -856,7 +876,10 @@ class BaseROS2DeviceNode(Node, Generic[T]): site_index = sites.index(original_instance) site_name = site_names[site_index] if site_name != target_site: - self.transfer_to_new_resource(original_instance, tree, additional_add_params) + parent = self.transfer_to_new_resource(original_instance, tree, additional_add_params) + if parent is not None: + original_instances.append(parent) + parent_appended = True # 加载状态 original_instance.load_all_state(states) @@ -864,7 +887,8 @@ class BaseROS2DeviceNode(Node, Generic[T]): self.lab_logger().info( f"更新了资源属性 {plr_resource}[{tree.root_node.res_content.uuid}] " f"及其子节点 {child_count} 个" ) - original_instances.append(original_instance) + if not parent_appended: + original_instances.append(original_instance) # 调用driver的update回调 func = getattr(self.driver_instance, "resource_tree_update", None) @@ -881,8 +905,8 @@ class BaseROS2DeviceNode(Node, Generic[T]): action = i.get("action") # remove, add, update resources_uuid: List[str] = i.get("data") # 资源数据 additional_add_params = i.get("additional_add_params", {}) # 额外参数 - self.lab_logger().info( - f"[Resource Tree Update] Processing {action} operation, " f"resources count: {len(resources_uuid)}" + self.lab_logger().trace( + f"[资源同步] 处理 {action}, " f"resources count: {len(resources_uuid)}" ) tree_set = None if action in ["add", "update"]: @@ -894,8 +918,20 @@ class BaseROS2DeviceNode(Node, Generic[T]): if tree_set is None: raise ValueError("tree_set不能为None") plr_resources = tree_set.to_plr_resources() - result = _handle_add(plr_resources, tree_set, additional_add_params) - new_tree_set = ResourceTreeSet.from_plr_resources(plr_resources) + result, parents = _handle_add(plr_resources, tree_set, additional_add_params) + parents: List[Optional["ResourcePLR"]] = [i for i in parents if i is not None] + # de_dupe_parents = list(set(parents)) + # Fix unhashable type error for WareHouse + de_dupe_parents = [] + _seen_ids = set() + for p in parents: + if id(p) not in _seen_ids: + _seen_ids.add(id(p)) + de_dupe_parents.append(p) + new_tree_set = ResourceTreeSet.from_plr_resources(de_dupe_parents) # 去重 + for tree in new_tree_set.trees: + if tree.root_node.res_content.uuid_parent is None and self.node_name != "host_node": + tree.root_node.res_content.parent_uuid = self.uuid r = SerialCommand.Request() r.command = json.dumps( {"data": {"data": new_tree_set.dump()}, "action": "update"}) # 和Update Resource一致 @@ -914,7 +950,10 @@ class BaseROS2DeviceNode(Node, Generic[T]): plr_resources.append(ResourceTreeSet([tree]).to_plr_resources()[0]) result, original_instances = _handle_update(plr_resources, tree_set, additional_add_params) if not BasicConfig.no_update_feedback: - new_tree_set = ResourceTreeSet.from_plr_resources(original_instances) + new_tree_set = ResourceTreeSet.from_plr_resources(original_instances) # 去重 + for tree in new_tree_set.trees: + if tree.root_node.res_content.uuid_parent is None and self.node_name != "host_node": + tree.root_node.res_content.parent_uuid = self.uuid r = SerialCommand.Request() r.command = json.dumps( {"data": {"data": new_tree_set.dump()}, "action": "update"}) # 和Update Resource一致 @@ -934,15 +973,15 @@ class BaseROS2DeviceNode(Node, Generic[T]): # 返回处理结果 result_json = {"results": results, "total": len(data)} res.response = json.dumps(result_json, ensure_ascii=False, cls=TypeEncoder) - self.lab_logger().info(f"[Resource Tree Update] Completed processing {len(data)} operations") + # self.lab_logger().info(f"[Resource Tree Update] Completed processing {len(data)} operations") except json.JSONDecodeError as e: error_msg = f"Invalid JSON format: {str(e)}" - self.lab_logger().error(f"[Resource Tree Update] {error_msg}") + self.lab_logger().error(f"[资源同步] {error_msg}") res.response = json.dumps({"success": False, "error": error_msg}, ensure_ascii=False) except Exception as e: error_msg = f"Unexpected error: {str(e)}" - self.lab_logger().error(f"[Resource Tree Update] {error_msg}") + self.lab_logger().error(f"[资源同步] {error_msg}") self.lab_logger().error(traceback.format_exc()) res.response = json.dumps({"success": False, "error": error_msg}, ensure_ascii=False) @@ -1263,7 +1302,8 @@ class BaseROS2DeviceNode(Node, Generic[T]): ACTION, action_paramtypes = self.get_real_function(self.driver_instance, action_name) action_kwargs = convert_from_ros_msg_with_mapping(goal, action_value_mapping["goal"]) - self.lab_logger().debug(f"任务 {ACTION.__name__} 接收到原始目标: {action_kwargs}") + self.lab_logger().debug(f"任务 {ACTION.__name__} 接收到原始目标: {str(action_kwargs)[:1000]}") + self.lab_logger().trace(f"任务 {ACTION.__name__} 接收到原始目标: {action_kwargs}") error_skip = False # 向Host查询物料当前状态,如果是host本身的增加物料的请求,则直接跳过 if action_name not in ["create_resource_detailed", "create_resource"]: @@ -1279,9 +1319,14 @@ class BaseROS2DeviceNode(Node, Generic[T]): # 批量查询资源 queried_resources = [] for resource_data in resource_inputs: - plr_resource = await self.get_resource_with_dir( - resource_id=resource_data["id"], with_children=True - ) + unilabos_uuid = resource_data.get("data", {}).get("unilabos_uuid") + if unilabos_uuid is None: + plr_resource = await self.get_resource_with_dir( + resource_id=resource_data["id"], with_children=True + ) + else: + resource_tree = await self.get_resource([unilabos_uuid]) + plr_resource = resource_tree.to_plr_resources()[0] if "sample_id" in resource_data: plr_resource.unilabos_extra["sample_uuid"] = resource_data["sample_id"] queried_resources.append(plr_resource) @@ -1330,9 +1375,8 @@ class BaseROS2DeviceNode(Node, Generic[T]): execution_success = True except Exception as _: execution_error = traceback.format_exc() - error( - f"异步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{action_kwargs}" - ) + error(f"异步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{str(action_kwargs)[:1000]}") + trace(f"异步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{action_kwargs}") future = ROS2DeviceNode.run_async_func(ACTION, trace_error=False, **action_kwargs) future.add_done_callback(_handle_future_exception) @@ -1352,8 +1396,9 @@ class BaseROS2DeviceNode(Node, Generic[T]): except Exception as _: execution_error = traceback.format_exc() error( - f"同步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{action_kwargs}" - ) + f"同步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{str(action_kwargs)[:1000]}") + trace( + f"同步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{action_kwargs}") future.add_done_callback(_handle_future_exception) @@ -1421,7 +1466,7 @@ class BaseROS2DeviceNode(Node, Generic[T]): for r in rs: res = self.resource_tracker.parent_resource(r) # 获取 resource 对象 else: - res = self.resource_tracker.parent_resource(r) + res = self.resource_tracker.parent_resource(rs) if id(res) not in seen: seen.add(id(res)) unique_resources.append(res) @@ -1497,8 +1542,7 @@ class BaseROS2DeviceNode(Node, Generic[T]): resource_data = function_args[arg_name] if isinstance(resource_data, dict) and "id" in resource_data: try: - converted_resource = self._convert_resource_sync(resource_data) - function_args[arg_name] = converted_resource + function_args[arg_name] = self._convert_resources_sync(resource_data["uuid"])[0] except Exception as e: self.lab_logger().error( f"转换ResourceSlot参数 {arg_name} 失败: {e}\n{traceback.format_exc()}" @@ -1512,12 +1556,8 @@ class BaseROS2DeviceNode(Node, Generic[T]): resource_list = function_args[arg_name] if isinstance(resource_list, list): try: - converted_resources = [] - for resource_data in resource_list: - if isinstance(resource_data, dict) and "id" in resource_data: - converted_resource = self._convert_resource_sync(resource_data) - converted_resources.append(converted_resource) - function_args[arg_name] = converted_resources + uuids = [r["uuid"] for r in resource_list if isinstance(r, dict) and "id" in r] + function_args[arg_name] = self._convert_resources_sync(*uuids) if uuids else [] except Exception as e: self.lab_logger().error( f"转换ResourceSlot列表参数 {arg_name} 失败: {e}\n{traceback.format_exc()}" @@ -1530,20 +1570,27 @@ class BaseROS2DeviceNode(Node, Generic[T]): f"执行动作时JSON缺少function_name或function_args: {ex}\n原JSON: {string}\n{traceback.format_exc()}" ) - def _convert_resource_sync(self, resource_data: Dict[str, Any]): - """同步转换资源数据为实例""" - # 创建资源查询请求 - r = SerialCommand.Request() - r.command = json.dumps( - { - "id": resource_data.get("id", None), - "uuid": resource_data.get("uuid", None), - "with_children": True, - } - ) + def _convert_resources_sync(self, *uuids: str) -> List["ResourcePLR"]: + """同步转换资源 UUID 为实例 - # 同步调用资源查询服务 - future = self._resource_clients["resource_get"].call_async(r) + Args: + *uuids: 一个或多个资源 UUID + + Returns: + 单个 UUID 时返回单个资源实例,多个 UUID 时返回资源实例列表 + """ + if not uuids: + raise ValueError("至少需要提供一个 UUID") + + uuids_list = list(uuids) + future = self._resource_clients["c2s_update_resource_tree"].call_async(SerialCommand.Request( + command=json.dumps( + { + "data": {"data": uuids_list, "with_children": True}, + "action": "get", + } + ) + )) # 等待结果(使用while循环,每次sleep 0.05秒,最多等待30秒) timeout = 30.0 @@ -1553,27 +1600,40 @@ class BaseROS2DeviceNode(Node, Generic[T]): elapsed += 0.05 if not future.done(): - raise Exception(f"资源查询超时: {resource_data}") + raise Exception(f"资源查询超时: {uuids_list}") response = future.result() if response is None: - raise Exception(f"资源查询返回空结果: {resource_data}") + raise Exception(f"资源查询返回空结果: {uuids_list}") raw_data = json.loads(response.response) # 转换为 PLR 资源 tree_set = ResourceTreeSet.from_raw_dict_list(raw_data) - plr_resource = tree_set.to_plr_resources()[0] + if not len(tree_set.trees): + raise Exception(f"资源查询返回空树: {raw_data}") + plr_resources = tree_set.to_plr_resources() # 通过资源跟踪器获取本地实例 - res = self.resource_tracker.figure_resource(plr_resource, try_mode=True) - if len(res) == 0: - self.lab_logger().warning(f"资源转换未能索引到实例: {resource_data},返回新建实例") - return plr_resource - elif len(res) == 1: - return res[0] - else: - raise ValueError(f"资源转换得到多个实例: {res}") + figured_resources: List[ResourcePLR] = [] + for plr_resource, tree in zip(plr_resources, tree_set.trees): + res = self.resource_tracker.figure_resource(plr_resource, try_mode=True) + if len(res) == 0: + self.lab_logger().warning(f"资源转换未能索引到实例: {tree.root_node.res_content},返回新建实例") + figured_resources.append(plr_resource) + elif len(res) == 1: + figured_resources.append(res[0]) + else: + raise ValueError(f"资源转换得到多个实例: {res}") + + mapped_plr_resources = [] + for uuid in uuids_list: + for plr_resource in figured_resources: + r = self.resource_tracker.loop_find_with_uuid(plr_resource, uuid) + mapped_plr_resources.append(r) + break + + return mapped_plr_resources async def _execute_driver_command_async(self, string: str): try: diff --git a/unilabos/ros/nodes/presets/host_node.py b/unilabos/ros/nodes/presets/host_node.py index dffd204..101476a 100644 --- a/unilabos/ros/nodes/presets/host_node.py +++ b/unilabos/ros/nodes/presets/host_node.py @@ -23,6 +23,7 @@ from unilabos_msgs.srv._serial_command import SerialCommand_Request, SerialComma from unique_identifier_msgs.msg import UUID from unilabos.registry.registry import lab_registry +from unilabos.resources.container import RegularContainer from unilabos.resources.graphio import initialize_resource from unilabos.resources.registry import add_schema from unilabos.ros.initialize_device import initialize_device_from_dict @@ -361,8 +362,7 @@ class HostNode(BaseROS2DeviceNode): request.command = "" future = sclient.call_async(request) # Use timeout for result as well - future.result(timeout_sec=5.0) - self.lab_logger().debug(f"[Host Node] Re-register completed for {device_namespace}") + future.result() except Exception as e: # Gracefully handle destruction during shutdown if "destruction was requested" in str(e) or self._shutting_down: @@ -586,11 +586,10 @@ class HostNode(BaseROS2DeviceNode): ) try: - new_li = [] + assert len(response) == 1, "Create Resource应当只返回一个结果" for i in response: res = json.loads(i) - new_li.append(res) - return {"resources": new_li, "liquid_input_resources": new_li} + return res except Exception as ex: pass _n = "\n" @@ -795,7 +794,8 @@ class HostNode(BaseROS2DeviceNode): assign_sample_id(action_kwargs) goal_msg = convert_to_ros_msg(action_client._action_type.Goal(), action_kwargs) - self.lab_logger().info(f"[Host Node] Sending goal for {action_id}: {goal_msg}") + self.lab_logger().info(f"[Host Node] Sending goal for {action_id}: {str(goal_msg)[:1000]}") + self.lab_logger().trace(f"[Host Node] Sending goal for {action_id}: {goal_msg}") action_client.wait_for_server() goal_uuid_obj = UUID(uuid=list(u.bytes)) @@ -1133,11 +1133,11 @@ class HostNode(BaseROS2DeviceNode): 接收序列化的 ResourceTreeSet 数据并进行处理 """ - self.lab_logger().info(f"[Host Node-Resource] Resource tree add request received") try: # 解析请求数据 data = json.loads(request.command) action = data["action"] + self.lab_logger().info(f"[Host Node-Resource] Resource tree {action} request received") data = data["data"] if action == "add": await self._resource_tree_action_add_callback(data, response) @@ -1243,7 +1243,7 @@ class HostNode(BaseROS2DeviceNode): data = json.loads(request.command) if "uuid" in data and data["uuid"] is not None: http_req = http_client.resource_tree_get([data["uuid"]], data["with_children"]) - elif "id" in data and data["id"].startswith("/"): + elif "id" in data: http_req = http_client.resource_get(data["id"], data["with_children"]) else: raise ValueError("没有使用正确的物料 id 或 uuid") @@ -1453,10 +1453,16 @@ class HostNode(BaseROS2DeviceNode): } def test_resource( - self, resource: ResourceSlot, resources: List[ResourceSlot], device: DeviceSlot, devices: List[DeviceSlot] + self, resource: ResourceSlot = None, resources: List[ResourceSlot] = None, device: DeviceSlot = None, devices: List[DeviceSlot] = None ) -> TestResourceReturn: + if resources is None: + resources = [] + if devices is None: + devices = [] + if resource is None: + resource = RegularContainer("test_resource传入None") return { - "resources": ResourceTreeSet.from_plr_resources([resource, *resources]).dump(), + "resources": ResourceTreeSet.from_plr_resources([resource, *resources], known_newly_created=True).dump(), "devices": [device, *devices], } @@ -1508,7 +1514,7 @@ class HostNode(BaseROS2DeviceNode): # 构建服务地址 srv_address = f"/srv{namespace}/s2c_resource_tree" - self.lab_logger().info(f"[Host Node-Resource] Notifying {device_id} for resource tree {action} operation") + self.lab_logger().trace(f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation started -------") # 创建服务客户端 sclient = self.create_client(SerialCommand, srv_address) @@ -1543,9 +1549,7 @@ class HostNode(BaseROS2DeviceNode): time.sleep(0.05) response = future.result() - self.lab_logger().info( - f"[Host Node-Resource] Resource tree {action} notification completed for {device_id}" - ) + self.lab_logger().trace(f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation completed -------") return True except Exception as e: diff --git a/unilabos/test/experiments/reaction_station_bioyond.json b/unilabos/test/experiments/reaction_station_bioyond.json index 5cbe5b4..dc3d89f 100644 --- a/unilabos/test/experiments/reaction_station_bioyond.json +++ b/unilabos/test/experiments/reaction_station_bioyond.json @@ -14,7 +14,11 @@ ], "type": "device", "class": "reaction_station.bioyond", - "position": {"x": 0, "y": 3800, "z": 0}, + "position": { + "x": 0, + "y": 1100, + "z": 0 + }, "config": { "config": { "api_key": "DE9BDDA0", @@ -57,6 +61,10 @@ "BIOYOND_PolymerStation_TipBox": [ "枪头盒", "3a143890-9d51-60ac-6d6f-6edb43c12041" + ], + "BIOYOND_PolymerStation_Measurement_Vial": [ + "测量小瓶", + "b1fc79c9-5864-4f05-8052-6ed3abc18a97" ] } }, @@ -66,6 +74,9 @@ "_resource_type": "unilabos.resources.bioyond.decks:BIOYOND_PolymerReactionStation_Deck" } }, + "size_x": 2700.0, + "size_y": 1080.0, + "size_z": 2000.0, "protocol_type": [] }, "data": {} @@ -77,7 +88,11 @@ "parent": "reaction_station_bioyond", "type": "device", "class": "reaction_station.reactor", - "position": {"x": 1150, "y": 380, "z": 0}, + "position": { + "x": 1150, + "y": 380, + "z": 0 + }, "config": {}, "data": {} }, @@ -88,7 +103,11 @@ "parent": "reaction_station_bioyond", "type": "device", "class": "reaction_station.reactor", - "position": {"x": 1365, "y": 380, "z": 0}, + "position": { + "x": 1365, + "y": 380, + "z": 0 + }, "config": {}, "data": {} }, @@ -99,7 +118,11 @@ "parent": "reaction_station_bioyond", "type": "device", "class": "reaction_station.reactor", - "position": {"x": 1580, "y": 380, "z": 0}, + "position": { + "x": 1580, + "y": 380, + "z": 0 + }, "config": {}, "data": {} }, @@ -110,7 +133,11 @@ "parent": "reaction_station_bioyond", "type": "device", "class": "reaction_station.reactor", - "position": {"x": 1790, "y": 380, "z": 0}, + "position": { + "x": 1790, + "y": 380, + "z": 0 + }, "config": {}, "data": {} }, @@ -121,7 +148,11 @@ "parent": "reaction_station_bioyond", "type": "device", "class": "reaction_station.reactor", - "position": {"x": 2010, "y": 380, "z": 0}, + "position": { + "x": 2010, + "y": 380, + "z": 0 + }, "config": {}, "data": {} }, @@ -134,7 +165,7 @@ "class": "BIOYOND_PolymerReactionStation_Deck", "position": { "x": 0, - "y": 0, + "y": 1100, "z": 0 }, "config": {