Compare commits

..

2 Commits

Author SHA1 Message Date
Xuwznln
638bff5bab Revert opcua client & move electrolyte node 2026-01-17 16:49:52 +08:00
Xuwznln
50a5086ba5 Revert log change & update registry 2026-01-17 16:48:54 +08:00
85 changed files with 1738 additions and 8179 deletions

View File

@@ -1,60 +0,0 @@
# unilabos: Production package (depends on unilabos-env + pip unilabos)
# For production deployment
package:
name: unilabos
version: 0.10.17
source:
path: ../../unilabos
target_directory: unilabos
build:
python:
entry_points:
- unilab = unilabos.app.main:main
script:
- set PIP_NO_INDEX=
- if: win
then:
- copy %RECIPE_DIR%\..\..\MANIFEST.in %SRC_DIR%
- copy %RECIPE_DIR%\..\..\setup.cfg %SRC_DIR%
- copy %RECIPE_DIR%\..\..\setup.py %SRC_DIR%
- pip install %SRC_DIR%
- if: unix
then:
- cp $RECIPE_DIR/../../MANIFEST.in $SRC_DIR
- cp $RECIPE_DIR/../../setup.cfg $SRC_DIR
- cp $RECIPE_DIR/../../setup.py $SRC_DIR
- pip install $SRC_DIR
requirements:
host:
- python ==3.11.14
- pip
- setuptools
- zstd
- zstandard
run:
- zstd
- zstandard
- networkx
- typing_extensions
- websockets
- pint
- fastapi
- jinja2
- requests
- uvicorn
- opcua
- pyserial
- pandas
- pymodbus
- matplotlib
- pylibftdi
- uni-lab::unilabos-env ==0.10.17
about:
repository: https://github.com/deepmodeling/Uni-Lab-OS
license: GPL-3.0-only
description: "UniLabOS - Production package with minimal ROS2 dependencies"

View File

@@ -1,39 +0,0 @@
# unilabos-env: conda environment dependencies (ROS2 + conda packages)
package:
name: unilabos-env
version: 0.10.17
build:
noarch: generic
requirements:
run:
# Python
- zstd
- zstandard
- conda-forge::python ==3.11.14
- conda-forge::opencv
# ROS2 dependencies (from ci-check.yml)
- robostack-staging::ros-humble-ros-core
- robostack-staging::ros-humble-action-msgs
- robostack-staging::ros-humble-std-msgs
- robostack-staging::ros-humble-geometry-msgs
- robostack-staging::ros-humble-control-msgs
- robostack-staging::ros-humble-nav2-msgs
- robostack-staging::ros-humble-cv-bridge
- robostack-staging::ros-humble-vision-opencv
- robostack-staging::ros-humble-tf-transformations
- robostack-staging::ros-humble-moveit-msgs
- robostack-staging::ros-humble-tf2-ros
- robostack-staging::ros-humble-tf2-ros-py
- conda-forge::transforms3d
- conda-forge::uv
# UniLabOS custom messages
- uni-lab::ros-humble-unilabos-msgs
about:
repository: https://github.com/deepmodeling/Uni-Lab-OS
license: GPL-3.0-only
description: "UniLabOS Environment - ROS2 and conda dependencies"

View File

@@ -1,42 +0,0 @@
# unilabos-full: Full package with all features
# Depends on unilabos + complete ROS2 desktop + dev tools
package:
name: unilabos-full
version: 0.10.17
build:
noarch: generic
requirements:
run:
# Base unilabos package (includes unilabos-env)
- uni-lab::unilabos ==0.10.17
# Documentation tools
- sphinx
- sphinx_rtd_theme
# Web UI
- gradio
- flask
# Interactive development
- ipython
- jupyter
- jupyros
- colcon-common-extensions
# ROS2 full desktop (includes rviz2, gazebo, etc.)
- robostack-staging::ros-humble-desktop-full
# Navigation and motion control
- ros-humble-navigation2
- ros-humble-ros2-control
- ros-humble-robot-state-publisher
- ros-humble-joint-state-publisher
# MoveIt motion planning
- ros-humble-moveit
- ros-humble-moveit-servo
# Simulation
- ros-humble-simulation
about:
repository: https://github.com/deepmodeling/Uni-Lab-OS
license: GPL-3.0-only
description: "UniLabOS Full - Complete package with ROS2 Desktop, MoveIt, Navigation2, Gazebo, Jupyter"

91
.conda/recipe.yaml Normal file
View File

@@ -0,0 +1,91 @@
package:
name: unilabos
version: 0.10.15
source:
path: ../unilabos
target_directory: unilabos
build:
python:
entry_points:
- unilab = unilabos.app.main:main
script:
- set PIP_NO_INDEX=
- if: win
then:
- copy %RECIPE_DIR%\..\MANIFEST.in %SRC_DIR%
- copy %RECIPE_DIR%\..\setup.cfg %SRC_DIR%
- copy %RECIPE_DIR%\..\setup.py %SRC_DIR%
- call %PYTHON% -m pip install %SRC_DIR%
- if: unix
then:
- cp $RECIPE_DIR/../MANIFEST.in $SRC_DIR
- cp $RECIPE_DIR/../setup.cfg $SRC_DIR
- cp $RECIPE_DIR/../setup.py $SRC_DIR
- $PYTHON -m pip install $SRC_DIR
requirements:
host:
- python ==3.11.11
- pip
- setuptools
- zstd
- zstandard
run:
- conda-forge::python ==3.11.11
- compilers
- cmake
- zstd
- zstandard
- ninja
- if: unix
then:
- make
- sphinx
- sphinx_rtd_theme
- numpy
- scipy
- pandas
- networkx
- matplotlib
- pint
- pyserial
- pyusb
- pylibftdi
- pymodbus
- python-can
- pyvisa
- opencv
- pydantic
- fastapi
- uvicorn
- gradio
- flask
- websockets
- ipython
- jupyter
- jupyros
- colcon-common-extensions
- robostack-staging::ros-humble-desktop-full
- robostack-staging::ros-humble-control-msgs
- robostack-staging::ros-humble-sensor-msgs
- robostack-staging::ros-humble-trajectory-msgs
- ros-humble-navigation2
- ros-humble-ros2-control
- ros-humble-robot-state-publisher
- ros-humble-joint-state-publisher
- ros-humble-rosbridge-server
- ros-humble-cv-bridge
- ros-humble-tf2
- ros-humble-moveit
- ros-humble-moveit-servo
- ros-humble-simulation
- ros-humble-tf-transformations
- transforms3d
- uni-lab::ros-humble-unilabos-msgs
about:
repository: https://github.com/deepmodeling/Uni-Lab-OS
license: GPL-3.0-only
description: "Uni-Lab-OS"

View File

@@ -0,0 +1,9 @@
@echo off
setlocal enabledelayedexpansion
REM upgrade pip
"%PREFIX%\python.exe" -m pip install --upgrade pip
REM install extra deps
"%PREFIX%\python.exe" -m pip install paho-mqtt opentrons_shared_data
"%PREFIX%\python.exe" -m pip install git+https://github.com/Xuwznln/pylabrobot.git

View File

@@ -0,0 +1,9 @@
#!/usr/bin/env bash
set -euxo pipefail
# make sure pip is available
"$PREFIX/bin/python" -m pip install --upgrade pip
# install extra deps
"$PREFIX/bin/python" -m pip install paho-mqtt opentrons_shared_data
"$PREFIX/bin/python" -m pip install git+https://github.com/Xuwznln/pylabrobot.git

View File

@@ -1,328 +0,0 @@
---
description: 设备驱动开发规范
globs: ["unilabos/devices/**/*.py"]
---
# 设备驱动开发规范
## 目录结构
```
unilabos/devices/
├── virtual/ # 虚拟设备(用于测试)
│ ├── virtual_stirrer.py
│ └── virtual_centrifuge.py
├── liquid_handling/ # 液体处理设备
├── balance/ # 天平设备
├── hplc/ # HPLC设备
├── pump_and_valve/ # 泵和阀门
├── temperature/ # 温度控制设备
├── workstation/ # 工作站(组合设备)
└── ...
```
## 设备类完整模板
```python
import asyncio
import logging
import time as time_module
from typing import Dict, Any, Optional
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode
class MyDevice:
"""
设备类描述
Attributes:
device_id: 设备唯一标识
config: 设备配置字典
data: 设备状态数据
"""
_ros_node: BaseROS2DeviceNode
def __init__(
self,
device_id: str = None,
config: Dict[str, Any] = None,
**kwargs
):
"""
初始化设备
Args:
device_id: 设备ID
config: 配置字典
**kwargs: 其他参数
"""
# 兼容不同调用方式
if device_id is None and 'id' in kwargs:
device_id = kwargs.pop('id')
if config is None and 'config' in kwargs:
config = kwargs.pop('config')
self.device_id = device_id or "unknown_device"
self.config = config or {}
self.data = {}
# 从config读取参数
self.port = self.config.get('port') or kwargs.get('port', 'COM1')
self._max_value = self.config.get('max_value', 1000.0)
# 初始化日志
self.logger = logging.getLogger(f"MyDevice.{self.device_id}")
self.logger.info(f"设备 {self.device_id} 已创建")
def post_init(self, ros_node: BaseROS2DeviceNode):
"""
ROS节点注入 - 在ROS节点创建后调用
Args:
ros_node: ROS2设备节点实例
"""
self._ros_node = ros_node
async def initialize(self) -> bool:
"""
初始化设备 - 连接硬件、设置初始状态
Returns:
bool: 初始化是否成功
"""
self.logger.info(f"初始化设备 {self.device_id}")
try:
# 执行硬件初始化
# await self._connect_hardware()
# 设置初始状态
self.data.update({
"status": "待机",
"is_running": False,
"current_value": 0.0,
})
self.logger.info(f"设备 {self.device_id} 初始化完成")
return True
except Exception as e:
self.logger.error(f"初始化失败: {e}")
self.data["status"] = f"错误: {e}"
return False
async def cleanup(self) -> bool:
"""
清理设备 - 断开连接、释放资源
Returns:
bool: 清理是否成功
"""
self.logger.info(f"清理设备 {self.device_id}")
self.data.update({
"status": "离线",
"is_running": False,
})
return True
# ==================== 设备动作 ====================
async def execute_action(
self,
param1: float,
param2: str = "",
**kwargs
) -> bool:
"""
执行设备动作
Args:
param1: 参数1
param2: 参数2可选
Returns:
bool: 动作是否成功
"""
# 类型转换和验证
try:
param1 = float(param1)
except (ValueError, TypeError) as e:
self.logger.error(f"参数类型错误: {e}")
return False
# 参数验证
if param1 > self._max_value:
self.logger.error(f"参数超出范围: {param1} > {self._max_value}")
return False
self.logger.info(f"执行动作: param1={param1}, param2={param2}")
# 更新状态
self.data.update({
"status": "运行中",
"is_running": True,
})
# 执行动作(带进度反馈)
duration = 10.0 # 秒
start_time = time_module.time()
while True:
elapsed = time_module.time() - start_time
remaining = max(0, duration - elapsed)
progress = min(100, (elapsed / duration) * 100)
self.data.update({
"status": f"运行中: {progress:.0f}%",
"remaining_time": remaining,
})
if remaining <= 0:
break
await self._ros_node.sleep(1.0)
# 完成
self.data.update({
"status": "完成",
"is_running": False,
})
self.logger.info("动作执行完成")
return True
# ==================== 状态属性 ====================
@property
def status(self) -> str:
"""设备状态 - 自动发布为ROS Topic"""
return self.data.get("status", "未知")
@property
def is_running(self) -> bool:
"""是否正在运行"""
return self.data.get("is_running", False)
@property
def current_value(self) -> float:
"""当前值"""
return self.data.get("current_value", 0.0)
# ==================== 辅助方法 ====================
def get_device_info(self) -> Dict[str, Any]:
"""获取设备信息"""
return {
"device_id": self.device_id,
"status": self.status,
"is_running": self.is_running,
"current_value": self.current_value,
}
def __str__(self) -> str:
return f"MyDevice({self.device_id}: {self.status})"
```
## 关键规则
### 1. 参数处理
所有动作方法的参数都可能以字符串形式传入,必须进行类型转换:
```python
async def my_action(self, value: float, **kwargs) -> bool:
# 始终进行类型转换
try:
value = float(value)
except (ValueError, TypeError) as e:
self.logger.error(f"参数类型错误: {e}")
return False
```
### 2. vessel 参数处理
vessel 参数可能是字符串ID或字典
```python
def extract_vessel_id(vessel: Union[str, dict]) -> str:
if isinstance(vessel, dict):
return vessel.get("id", "")
return str(vessel) if vessel else ""
```
### 3. 状态更新
使用 `self.data` 字典存储状态,属性读取状态:
```python
# 更新状态
self.data["status"] = "运行中"
self.data["current_speed"] = 300.0
# 读取状态(通过属性)
@property
def status(self) -> str:
return self.data.get("status", "待机")
```
### 4. 异步等待
使用 ROS 节点的 sleep 方法:
```python
# 正确
await self._ros_node.sleep(1.0)
# 避免(除非在纯 Python 测试环境)
await asyncio.sleep(1.0)
```
### 5. 进度反馈
长时间运行的操作需要提供进度反馈:
```python
while remaining > 0:
progress = (elapsed / total_time) * 100
self.data["status"] = f"运行中: {progress:.0f}%"
self.data["remaining_time"] = remaining
await self._ros_node.sleep(1.0)
```
## 虚拟设备
虚拟设备用于测试和演示,放在 `unilabos/devices/virtual/` 目录:
- 类名以 `Virtual` 开头
- 文件名以 `virtual_` 开头
- 模拟真实设备的行为和时序
- 使用表情符号增强日志可读性(可选)
## 工作站设备
工作站是组合多个设备的复杂设备:
```python
from unilabos.devices.workstation.workstation_base import WorkstationBase
class MyWorkstation(WorkstationBase):
"""组合工作站"""
async def execute_workflow(self, workflow: Dict[str, Any]) -> bool:
"""执行工作流"""
pass
```
## 设备注册
设备类开发完成后,需要在注册表中注册:
1. 创建/编辑 `unilabos/registry/devices/my_category.yaml`
2. 添加设备配置(参考 `virtual_device.yaml`
3. 运行 `--complete_registry` 自动生成 schema

View File

@@ -1,240 +0,0 @@
---
description: 协议编译器开发规范
globs: ["unilabos/compile/**/*.py"]
---
# 协议编译器开发规范
## 概述
协议编译器负责将高级实验操作(如 Stir、Add、Filter编译为设备可执行的动作序列。
## 文件命名
- 位置: `unilabos/compile/`
- 命名: `{operation}_protocol.py`
- 示例: `stir_protocol.py`, `add_protocol.py`, `filter_protocol.py`
## 协议函数模板
```python
from typing import List, Dict, Any, Union
import networkx as nx
import logging
from .utils.unit_parser import parse_time_input
from .utils.vessel_parser import extract_vessel_id
logger = logging.getLogger(__name__)
def generate_{operation}_protocol(
G: nx.DiGraph,
vessel: Union[str, dict],
param1: Union[str, float] = "0",
param2: float = 0.0,
**kwargs
) -> List[Dict[str, Any]]:
"""
生成{操作}协议序列
Args:
G: 物理拓扑图 (NetworkX DiGraph)
vessel: 容器ID或Resource字典
param1: 参数1支持字符串单位如 "5 min"
param2: 参数2
**kwargs: 其他参数
Returns:
List[Dict]: 动作序列
Raises:
ValueError: 参数无效时
"""
# 1. 提取 vessel_id
vessel_id = extract_vessel_id(vessel)
# 2. 验证参数
if not vessel_id:
raise ValueError("vessel 参数不能为空")
if vessel_id not in G.nodes():
raise ValueError(f"容器 '{vessel_id}' 不存在于系统中")
# 3. 解析参数(支持单位)
parsed_param1 = parse_time_input(param1) # "5 min" -> 300.0
# 4. 查找设备
device_id = find_connected_device(G, vessel_id, device_type="my_device")
# 5. 生成动作序列
action_sequence = []
action = {
"device_id": device_id,
"action_name": "my_action",
"action_kwargs": {
"vessel": {"id": vessel_id}, # 始终使用字典格式
"param1": float(parsed_param1),
"param2": float(param2),
}
}
action_sequence.append(action)
logger.info(f"生成协议: {len(action_sequence)} 个动作")
return action_sequence
def find_connected_device(
G: nx.DiGraph,
vessel_id: str,
device_type: str = ""
) -> str:
"""
查找与容器相连的设备
Args:
G: 拓扑图
vessel_id: 容器ID
device_type: 设备类型关键字
Returns:
str: 设备ID
"""
# 查找所有匹配类型的设备
device_nodes = []
for node in G.nodes():
node_class = G.nodes[node].get('class', '') or ''
if device_type.lower() in node_class.lower():
device_nodes.append(node)
# 检查连接
if vessel_id and device_nodes:
for device in device_nodes:
if G.has_edge(device, vessel_id) or G.has_edge(vessel_id, device):
return device
# 返回第一个可用设备
if device_nodes:
return device_nodes[0]
# 默认设备
return f"{device_type}_1"
```
## 关键规则
### 1. vessel 参数处理
vessel 参数可能是字符串或字典,需要统一处理:
```python
def extract_vessel_id(vessel: Union[str, dict]) -> str:
"""提取vessel_id"""
if isinstance(vessel, dict):
# 可能是 {"id": "xxx"} 或完整 Resource 对象
return vessel.get("id", list(vessel.values())[0].get("id", ""))
return str(vessel) if vessel else ""
```
### 2. action_kwargs 中的 vessel
始终使用 `{"id": vessel_id}` 格式传递 vessel
```python
# 正确
"action_kwargs": {
"vessel": {"id": vessel_id}, # 字符串ID包装为字典
}
# 避免
"action_kwargs": {
"vessel": vessel_resource, # 不要传递完整 Resource 对象
}
```
### 3. 单位解析
使用 `parse_time_input` 解析时间参数:
```python
from .utils.unit_parser import parse_time_input
# 支持格式: "5 min", "1 h", "300", "1.5 hours"
time_seconds = parse_time_input("5 min") # -> 300.0
time_seconds = parse_time_input(120) # -> 120.0
time_seconds = parse_time_input("1 h") # -> 3600.0
```
### 4. 参数验证
所有参数必须进行验证和类型转换:
```python
# 验证范围
if speed < 10.0 or speed > 1500.0:
logger.warning(f"速度 {speed} 超出范围,修正为 300")
speed = 300.0
# 类型转换
param = float(param) if not isinstance(param, (int, float)) else param
```
### 5. 日志记录
使用项目日志记录器:
```python
logger = logging.getLogger(__name__)
def generate_protocol(...):
logger.info(f"开始生成协议...")
logger.debug(f"参数: vessel={vessel_id}, time={time}")
logger.warning(f"参数修正: {old_value} -> {new_value}")
```
## 便捷函数
为常用操作提供便捷函数:
```python
def stir_briefly(G: nx.DiGraph, vessel: Union[str, dict],
speed: float = 300.0) -> List[Dict[str, Any]]:
"""短时间搅拌30秒"""
return generate_stir_protocol(G, vessel, time="30", stir_speed=speed)
def stir_vigorously(G: nx.DiGraph, vessel: Union[str, dict],
time: str = "5 min") -> List[Dict[str, Any]]:
"""剧烈搅拌"""
return generate_stir_protocol(G, vessel, time=time, stir_speed=800.0)
```
## 测试函数
每个协议文件应包含测试函数:
```python
def test_{operation}_protocol():
"""测试协议生成"""
# 测试参数处理
vessel_dict = {"id": "flask_1", "name": "反应瓶1"}
vessel_id = extract_vessel_id(vessel_dict)
assert vessel_id == "flask_1"
# 测试单位解析
time_s = parse_time_input("5 min")
assert time_s == 300.0
if __name__ == "__main__":
test_{operation}_protocol()
```
## 现有协议参考
- `stir_protocol.py` - 搅拌操作
- `add_protocol.py` - 添加物料
- `filter_protocol.py` - 过滤操作
- `heatchill_protocol.py` - 加热/冷却
- `separate_protocol.py` - 分离操作
- `evaporate_protocol.py` - 蒸发操作

View File

@@ -1,319 +0,0 @@
---
description: 注册表配置规范 (YAML)
globs: ["unilabos/registry/**/*.yaml"]
---
# 注册表配置规范
## 概述
注册表使用 YAML 格式定义设备和资源类型,是 Uni-Lab-OS 的核心配置系统。
## 目录结构
```
unilabos/registry/
├── devices/ # 设备类型注册
│ ├── virtual_device.yaml
│ ├── liquid_handler.yaml
│ └── ...
├── device_comms/ # 通信设备配置
│ ├── communication_devices.yaml
│ └── modbus_ioboard.yaml
└── resources/ # 资源类型注册
├── bioyond/
├── organic/
├── opentrons/
└── ...
```
## 设备注册表格式
### 基本结构
```yaml
device_type_id:
# 基本信息
description: "设备描述"
version: "1.0.0"
category:
- category_name
icon: "icon_device.webp"
# 类配置
class:
module: "unilabos.devices.my_module:MyClass"
type: python
# 状态类型(属性 -> ROS消息类型
status_types:
status: String
temperature: Float64
is_running: Bool
# 动作映射
action_value_mappings:
action_name:
type: UniLabJsonCommand # 或 UniLabJsonCommandAsync
goal: {}
feedback: {}
result: {}
schema: {...}
handles: {}
```
### action_value_mappings 详细格式
```yaml
action_value_mappings:
# 同步动作
my_sync_action:
type: UniLabJsonCommand
goal:
param1: param1
param2: param2
feedback: {}
result:
success: success
message: message
goal_default:
param1: 0.0
param2: ""
handles: {}
placeholder_keys:
device_param: unilabos_devices # 设备选择器
resource_param: unilabos_resources # 资源选择器
schema:
title: "动作名称参数"
description: "动作描述"
type: object
properties:
goal:
type: object
properties:
param1:
type: number
param2:
type: string
required:
- param1
feedback: {}
result:
type: object
properties:
success:
type: boolean
message:
type: string
required:
- goal
# 异步动作
my_async_action:
type: UniLabJsonCommandAsync
goal: {}
feedback:
progress: progress
current_status: status
result:
success: success
schema: {...}
```
### 自动生成的动作
以 `auto-` 开头的动作由系统自动生成:
```yaml
action_value_mappings:
auto-initialize:
type: UniLabJsonCommandAsync
goal: {}
feedback: {}
result: {}
schema: {...}
auto-cleanup:
type: UniLabJsonCommandAsync
goal: {}
feedback: {}
result: {}
schema: {...}
```
### handles 配置
用于工作流编辑器中的数据流连接:
```yaml
handles:
input:
- handler_key: "input_resource"
data_type: "resource"
label: "输入资源"
data_source: "handle"
data_key: "resources"
output:
- handler_key: "output_labware"
data_type: "resource"
label: "输出器皿"
data_source: "executor"
data_key: "created_resource.@flatten"
```
## 资源注册表格式
```yaml
resource_type_id:
description: "资源描述"
version: "1.0.0"
category:
- category_name
icon: ""
handles: []
init_param_schema: {}
class:
module: "unilabos.resources.my_module:MyResource"
type: pylabrobot # 或 python
```
### PyLabRobot 资源示例
```yaml
BIOYOND_Electrolyte_6VialCarrier:
category:
- bottle_carriers
- bioyond
class:
module: "unilabos.resources.bioyond.bottle_carriers:BIOYOND_Electrolyte_6VialCarrier"
type: pylabrobot
version: "1.0.0"
```
## 状态类型映射
Python 类型到 ROS 消息类型的映射:
| Python 类型 | ROS 消息类型 |
|------------|-------------|
| `str` | `String` |
| `bool` | `Bool` |
| `int` | `Int64` |
| `float` | `Float64` |
| `list` | `String` (序列化) |
| `dict` | `String` (序列化) |
## 自动完善注册表
使用 `--complete_registry` 参数自动生成 schema
```bash
python -m unilabos.app.main --complete_registry
```
这会:
1. 扫描设备类的方法签名
2. 自动生成 `auto-` 前缀的动作
3. 生成 JSON Schema
4. 更新 YAML 文件
## 验证规则
1. **device_type_id** 必须唯一
2. **module** 路径必须正确可导入
3. **status_types** 的类型必须是有效的 ROS 消息类型
4. **schema** 必须是有效的 JSON Schema
## 示例:完整设备配置
```yaml
virtual_stirrer:
category:
- virtual_device
description: "虚拟搅拌器设备"
version: "1.0.0"
icon: "icon_stirrer.webp"
handles: []
init_param_schema: {}
class:
module: "unilabos.devices.virtual.virtual_stirrer:VirtualStirrer"
type: python
status_types:
status: String
operation_mode: String
current_speed: Float64
is_stirring: Bool
remaining_time: Float64
action_value_mappings:
auto-initialize:
type: UniLabJsonCommandAsync
goal: {}
feedback: {}
result: {}
schema:
title: "initialize参数"
type: object
properties:
goal:
type: object
properties: {}
feedback: {}
result: {}
required:
- goal
stir:
type: UniLabJsonCommandAsync
goal:
stir_time: stir_time
stir_speed: stir_speed
settling_time: settling_time
feedback:
current_speed: current_speed
remaining_time: remaining_time
result:
success: success
goal_default:
stir_time: 60.0
stir_speed: 300.0
settling_time: 30.0
handles: {}
schema:
title: "stir参数"
description: "搅拌操作"
type: object
properties:
goal:
type: object
properties:
stir_time:
type: number
description: "搅拌时间(秒)"
stir_speed:
type: number
description: "搅拌速度RPM"
settling_time:
type: number
description: "沉降时间(秒)"
required:
- stir_time
- stir_speed
feedback:
type: object
properties:
current_speed:
type: number
remaining_time:
type: number
result:
type: object
properties:
success:
type: boolean
required:
- goal
```

View File

@@ -1,233 +0,0 @@
---
description: ROS 2 集成开发规范
globs: ["unilabos/ros/**/*.py", "**/*_node.py"]
---
# ROS 2 集成开发规范
## 概述
Uni-Lab-OS 使用 ROS 2 作为设备通信中间件,基于 rclpy 实现。
## 核心组件
### BaseROS2DeviceNode
设备节点基类,提供:
- ROS Topic 自动发布(状态属性)
- Action Server 自动创建(设备动作)
- 资源管理服务
- 异步任务调度
```python
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode
```
### 消息转换器
```python
from unilabos.ros.msgs.message_converter import (
convert_to_ros_msg,
convert_from_ros_msg_with_mapping,
msg_converter_manager,
ros_action_to_json_schema,
ros_message_to_json_schema,
)
```
## 设备与 ROS 集成
### post_init 方法
设备类必须实现 `post_init` 方法接收 ROS 节点:
```python
class MyDevice:
_ros_node: BaseROS2DeviceNode
def post_init(self, ros_node: BaseROS2DeviceNode):
"""ROS节点注入"""
self._ros_node = ros_node
```
### 状态属性发布
设备的 `@property` 属性会自动发布为 ROS Topic
```python
class MyDevice:
@property
def temperature(self) -> float:
return self._temperature
# 自动发布到 /{namespace}/temperature Topic
```
### Topic 配置装饰器
```python
from unilabos.utils.decorator import topic_config
class MyDevice:
@property
@topic_config(period=1.0, print_publish=False, qos=10)
def fast_data(self) -> float:
"""高频数据 - 每秒发布一次"""
return self._fast_data
@property
@topic_config(period=5.0)
def slow_data(self) -> str:
"""低频数据 - 每5秒发布一次"""
return self._slow_data
```
### 订阅装饰器
```python
from unilabos.utils.decorator import subscribe
class MyDevice:
@subscribe(topic="/external/sensor_data", qos=10)
def on_sensor_data(self, msg):
"""订阅外部Topic"""
self._sensor_value = msg.data
```
## 异步操作
### 使用 ROS 节点睡眠
```python
# 推荐使用ROS节点的睡眠方法
await self._ros_node.sleep(1.0)
# 不推荐直接使用asyncio可能导致回调阻塞
await asyncio.sleep(1.0)
```
### 获取事件循环
```python
from unilabos.ros.x.rclpyx import get_event_loop
loop = get_event_loop()
```
## 消息类型
### unilabos_msgs 包
```python
from unilabos_msgs.msg import Resource
from unilabos_msgs.srv import (
ResourceAdd,
ResourceDelete,
ResourceUpdate,
ResourceList,
SerialCommand,
)
from unilabos_msgs.action import SendCmd
```
### Resource 消息结构
```python
Resource:
id: str
name: str
category: str
type: str
parent: str
children: List[str]
config: str # JSON字符串
data: str # JSON字符串
sample_id: str
pose: Pose
```
## 日志适配器
```python
from unilabos.utils.log import info, debug, warning, error, trace
class MyDevice:
def __init__(self):
# 创建设备专属日志器
self.logger = logging.getLogger(f"MyDevice.{self.device_id}")
```
ROSLoggerAdapter 同时向自定义日志和 ROS 日志发送消息。
## Action Server
设备动作自动创建为 ROS Action Server
```yaml
# 在注册表中配置
action_value_mappings:
my_action:
type: UniLabJsonCommandAsync # 异步Action
goal: {...}
feedback: {...}
result: {...}
```
### Action 类型
- **UniLabJsonCommand**: 同步动作
- **UniLabJsonCommandAsync**: 异步动作支持feedback
## 服务客户端
```python
from rclpy.client import Client
# 调用其他节点的服务
response = await self._ros_node.call_service(
service_name="/other_node/service",
request=MyServiceRequest(...)
)
```
## 命名空间
设备节点使用命名空间隔离:
```
/{device_id}/ # 设备命名空间
/{device_id}/status # 状态Topic
/{device_id}/temperature # 温度Topic
/{device_id}/my_action # 动作Server
```
## 调试
### 查看 Topic
```bash
ros2 topic list
ros2 topic echo /{device_id}/status
```
### 查看 Action
```bash
ros2 action list
ros2 action info /{device_id}/my_action
```
### 查看 Service
```bash
ros2 service list
ros2 service call /{device_id}/resource_list unilabos_msgs/srv/ResourceList
```
## 最佳实践
1. **状态属性命名**: 使用蛇形命名法snake_case
2. **Topic 频率**: 根据数据变化频率调整,避免过高频率
3. **Action 反馈**: 长时间操作提供进度反馈
4. **错误处理**: 使用 try-except 捕获并记录错误
5. **资源清理**: 在 cleanup 方法中正确清理资源

View File

@@ -1,357 +0,0 @@
---
description: 测试开发规范
globs: ["tests/**/*.py", "**/test_*.py"]
---
# 测试开发规范
## 目录结构
```
tests/
├── __init__.py
├── devices/ # 设备测试
│ └── liquid_handling/
│ └── test_transfer_liquid.py
├── resources/ # 资源测试
│ ├── test_bottle_carrier.py
│ └── test_resourcetreeset.py
├── ros/ # ROS消息测试
│ └── msgs/
│ ├── test_basic.py
│ ├── test_conversion.py
│ └── test_mapping.py
└── workflow/ # 工作流测试
└── merge_workflow.py
```
## 测试框架
使用 pytest 作为测试框架:
```bash
# 运行所有测试
pytest tests/
# 运行特定测试文件
pytest tests/resources/test_bottle_carrier.py
# 运行特定测试函数
pytest tests/resources/test_bottle_carrier.py::test_bottle_carrier
# 显示详细输出
pytest -v tests/
# 显示打印输出
pytest -s tests/
```
## 测试文件模板
```python
import pytest
from typing import List, Dict, Any
# 导入被测试的模块
from unilabos.resources.bioyond.bottle_carriers import (
BIOYOND_Electrolyte_6VialCarrier,
)
from unilabos.resources.bioyond.bottles import (
BIOYOND_PolymerStation_Solid_Vial,
)
class TestBottleCarrier:
"""BottleCarrier 测试类"""
def setup_method(self):
"""每个测试方法前执行"""
self.carrier = BIOYOND_Electrolyte_6VialCarrier("test_carrier")
def teardown_method(self):
"""每个测试方法后执行"""
pass
def test_carrier_creation(self):
"""测试载架创建"""
assert self.carrier.name == "test_carrier"
assert len(self.carrier.sites) == 6
def test_bottle_placement(self):
"""测试瓶子放置"""
bottle = BIOYOND_PolymerStation_Solid_Vial("test_bottle")
# 测试逻辑...
assert bottle.name == "test_bottle"
def test_standalone_function():
"""独立测试函数"""
result = some_function()
assert result is True
# 参数化测试
@pytest.mark.parametrize("input,expected", [
("5 min", 300.0),
("1 h", 3600.0),
("120", 120.0),
(60, 60.0),
])
def test_time_parsing(input, expected):
"""测试时间解析"""
from unilabos.compile.utils.unit_parser import parse_time_input
assert parse_time_input(input) == expected
# 异常测试
def test_invalid_input_raises_error():
"""测试无效输入抛出异常"""
with pytest.raises(ValueError) as exc_info:
invalid_function("bad_input")
assert "invalid" in str(exc_info.value).lower()
# 跳过条件测试
@pytest.mark.skipif(
not os.environ.get("ROS_DISTRO"),
reason="需要ROS环境"
)
def test_ros_feature():
"""需要ROS环境的测试"""
pass
```
## 设备测试
### 虚拟设备测试
```python
import pytest
import asyncio
from unittest.mock import MagicMock, AsyncMock
from unilabos.devices.virtual.virtual_stirrer import VirtualStirrer
class TestVirtualStirrer:
"""VirtualStirrer 测试"""
@pytest.fixture
def stirrer(self):
"""创建测试用搅拌器"""
device = VirtualStirrer(
device_id="test_stirrer",
config={"max_speed": 1500.0, "min_speed": 50.0}
)
# Mock ROS节点
mock_node = MagicMock()
mock_node.sleep = AsyncMock(return_value=None)
device.post_init(mock_node)
return device
@pytest.mark.asyncio
async def test_initialize(self, stirrer):
"""测试初始化"""
result = await stirrer.initialize()
assert result is True
assert stirrer.status == "待机中"
@pytest.mark.asyncio
async def test_stir_action(self, stirrer):
"""测试搅拌动作"""
await stirrer.initialize()
result = await stirrer.stir(
stir_time=5.0,
stir_speed=300.0,
settling_time=2.0
)
assert result is True
assert stirrer.operation_mode == "Completed"
@pytest.mark.asyncio
async def test_stir_invalid_speed(self, stirrer):
"""测试无效速度"""
await stirrer.initialize()
# 速度超出范围
result = await stirrer.stir(
stir_time=5.0,
stir_speed=2000.0, # 超过max_speed
settling_time=0.0
)
assert result is False
assert "错误" in stirrer.status
```
### 异步测试配置
```python
# conftest.py
import pytest
import asyncio
@pytest.fixture(scope="session")
def event_loop():
"""创建事件循环"""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
```
## 资源测试
```python
import pytest
from unilabos.resources.resource_tracker import (
ResourceTreeSet,
ResourceTreeInstance,
)
def test_resource_tree_creation():
"""测试资源树创建"""
tree_set = ResourceTreeSet()
# 添加资源
resource = {"id": "res_1", "name": "Resource 1"}
tree_set.add_resource(resource)
# 验证
assert len(tree_set.all_nodes) == 1
assert tree_set.get_resource("res_1") is not None
def test_resource_tree_merge():
"""测试资源树合并"""
local_set = ResourceTreeSet()
remote_set = ResourceTreeSet()
# 设置数据...
local_set.merge_remote_resources(remote_set)
# 验证合并结果...
```
## ROS 消息测试
```python
import pytest
from unilabos.ros.msgs.message_converter import (
convert_to_ros_msg,
convert_from_ros_msg_with_mapping,
msg_converter_manager,
)
def test_message_conversion():
"""测试消息转换"""
# Python -> ROS
python_data = {"id": "test", "value": 42}
ros_msg = convert_to_ros_msg(python_data, MyMsgType)
assert ros_msg.id == "test"
assert ros_msg.value == 42
# ROS -> Python
result = convert_from_ros_msg_with_mapping(ros_msg, mapping)
assert result["id"] == "test"
```
## 协议测试
```python
import pytest
import networkx as nx
from unilabos.compile.stir_protocol import (
generate_stir_protocol,
extract_vessel_id,
)
@pytest.fixture
def topology_graph():
"""创建测试拓扑图"""
G = nx.DiGraph()
G.add_node("flask_1", **{"class": "flask"})
G.add_node("stirrer_1", **{"class": "virtual_stirrer"})
G.add_edge("stirrer_1", "flask_1")
return G
def test_generate_stir_protocol(topology_graph):
"""测试搅拌协议生成"""
actions = generate_stir_protocol(
G=topology_graph,
vessel="flask_1",
time="5 min",
stir_speed=300.0
)
assert len(actions) == 1
assert actions[0]["device_id"] == "stirrer_1"
assert actions[0]["action_name"] == "stir"
def test_extract_vessel_id():
"""测试vessel_id提取"""
# 字典格式
assert extract_vessel_id({"id": "flask_1"}) == "flask_1"
# 字符串格式
assert extract_vessel_id("flask_2") == "flask_2"
# 空值
assert extract_vessel_id("") == ""
```
## 测试标记
```python
# 慢速测试
@pytest.mark.slow
def test_long_running():
pass
# 需要网络
@pytest.mark.network
def test_network_call():
pass
# 需要ROS
@pytest.mark.ros
def test_ros_feature():
pass
```
运行特定标记的测试:
```bash
pytest -m "not slow" # 排除慢速测试
pytest -m ros # 仅ROS测试
```
## 覆盖率
```bash
# 生成覆盖率报告
pytest --cov=unilabos tests/
# HTML报告
pytest --cov=unilabos --cov-report=html tests/
```
## 最佳实践
1. **测试命名**: `test_{功能}_{场景}_{预期结果}`
2. **独立性**: 每个测试独立运行,不依赖其他测试
3. **Mock外部依赖**: 使用 unittest.mock 模拟外部服务
4. **参数化**: 使用 `@pytest.mark.parametrize` 减少重复代码
5. **fixtures**: 使用 fixtures 共享测试设置
6. **断言清晰**: 每个断言只验证一件事

View File

@@ -1,353 +0,0 @@
---
description: Uni-Lab-OS 实验室自动化平台开发规范 - 核心规则
globs: ["**/*.py", "**/*.yaml", "**/*.json"]
---
# Uni-Lab-OS 项目开发规范
## 项目概述
Uni-Lab-OS 是一个实验室自动化操作系统,用于连接和控制各种实验设备,实现实验工作流的自动化和标准化。
## 技术栈
- **Python 3.11** - 核心开发语言
- **ROS 2** - 设备通信中间件 (rclpy)
- **Conda/Mamba** - 包管理 (robostack-staging, conda-forge)
- **FastAPI** - Web API 服务
- **WebSocket** - 实时通信
- **NetworkX** - 拓扑图管理
- **YAML** - 配置和注册表定义
- **PyLabRobot** - 实验室自动化库集成
- **pytest** - 测试框架
- **asyncio** - 异步编程
## 项目结构
```
unilabos/
├── app/ # 应用入口、Web服务、后端
├── compile/ # 协议编译器 (stir, add, filter 等)
├── config/ # 配置管理
├── devices/ # 设备驱动 (真实/虚拟)
├── device_comms/ # 设备通信协议
├── device_mesh/ # 3D网格和可视化
├── registry/ # 设备和资源类型注册表 (YAML)
├── resources/ # 资源定义
├── ros/ # ROS 2 集成
├── utils/ # 工具函数
└── workflow/ # 工作流管理
```
## 代码规范
### Python 风格
1. **类型注解**:所有函数必须使用类型注解
```python
def transfer_liquid(
source: str,
destination: str,
volume: float,
**kwargs
) -> List[Dict[str, Any]]:
```
2. **Docstring**:使用 Google 风格的文档字符串
```python
def initialize(self) -> bool:
"""
初始化设备
Returns:
bool: 初始化是否成功
"""
```
3. **导入顺序**
- 标准库
- 第三方库
- ROS 相关 (rclpy, unilabos_msgs)
- 项目内部模块
### 异步编程
1. 设备操作方法使用 `async def`
2. 使用 `await self._ros_node.sleep()` 而非 `asyncio.sleep()`
3. 长时间运行操作需提供进度反馈
```python
async def stir(self, stir_time: float, stir_speed: float, **kwargs) -> bool:
"""执行搅拌操作"""
start_time = time_module.time()
while True:
elapsed = time_module.time() - start_time
remaining = max(0, stir_time - elapsed)
self.data.update({
"remaining_time": remaining,
"status": f"搅拌中: {stir_speed} RPM"
})
if remaining <= 0:
break
await self._ros_node.sleep(1.0)
return True
```
### 日志规范
使用项目自定义日志系统:
```python
from unilabos.utils.log import logger, info, debug, warning, error, trace
# 在设备类中使用
self.logger = logging.getLogger(f"DeviceName.{self.device_id}")
self.logger.info("设备初始化完成")
```
## 设备驱动开发
### 设备类结构
```python
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode
class MyDevice:
"""设备驱动类"""
_ros_node: BaseROS2DeviceNode
def __init__(self, device_id: str = None, config: Dict[str, Any] = None, **kwargs):
self.device_id = device_id or "unknown_device"
self.config = config or {}
self.data = {} # 设备状态数据
def post_init(self, ros_node: BaseROS2DeviceNode):
"""ROS节点注入"""
self._ros_node = ros_node
async def initialize(self) -> bool:
"""初始化设备"""
pass
async def cleanup(self) -> bool:
"""清理设备"""
pass
# 状态属性 - 自动发布为 ROS Topic
@property
def status(self) -> str:
return self.data.get("status", "待机")
```
### 状态属性装饰器
```python
from unilabos.utils.decorator import topic_config
class MyDevice:
@property
@topic_config(period=1.0, qos=10) # 每秒发布一次
def temperature(self) -> float:
return self._temperature
```
### 虚拟设备
虚拟设备放置在 `unilabos/devices/virtual/` 目录下,命名为 `virtual_*.py`
## 注册表配置
### 设备注册表 (YAML)
位置: `unilabos/registry/devices/*.yaml`
```yaml
my_device_type:
category:
- my_category
description: "设备描述"
version: "1.0.0"
class:
module: "unilabos.devices.my_device:MyDevice"
type: python
status_types:
status: String
temperature: Float64
action_value_mappings:
auto-initialize:
type: UniLabJsonCommandAsync
goal: {}
feedback: {}
result: {}
schema: {...}
```
### 资源注册表 (YAML)
位置: `unilabos/registry/resources/**/*.yaml`
```yaml
my_container:
category:
- container
class:
module: "unilabos.resources.my_resource:MyContainer"
type: pylabrobot
version: "1.0.0"
```
## 协议编译器
位置: `unilabos/compile/*_protocol.py`
### 协议生成函数模板
```python
from typing import List, Dict, Any, Union
import networkx as nx
def generate_my_protocol(
G: nx.DiGraph,
vessel: Union[str, dict],
param1: float = 0.0,
**kwargs
) -> List[Dict[str, Any]]:
"""
生成操作协议序列
Args:
G: 物理拓扑图
vessel: 容器ID或字典
param1: 参数1
Returns:
List[Dict]: 动作序列
"""
# 提取vessel_id
vessel_id = vessel if isinstance(vessel, str) else vessel.get("id", "")
# 查找设备
device_id = find_connected_device(G, vessel_id)
# 生成动作
action_sequence = [{
"device_id": device_id,
"action_name": "my_action",
"action_kwargs": {
"vessel": {"id": vessel_id},
"param1": float(param1)
}
}]
return action_sequence
```
## 测试规范
### 测试文件位置
- 单元测试: `tests/` 目录
- 设备测试: `tests/devices/`
- 资源测试: `tests/resources/`
- ROS消息测试: `tests/ros/msgs/`
### 测试命名
```python
# tests/devices/my_device/test_my_device.py
import pytest
def test_device_initialization():
"""测试设备初始化"""
pass
def test_device_action():
"""测试设备动作"""
pass
```
## 错误处理
```python
from unilabos.utils.exception import UniLabException
try:
result = await device.execute_action()
except ValueError as e:
self.logger.error(f"参数错误: {e}")
self.data["status"] = "错误: 参数无效"
return False
except Exception as e:
self.logger.error(f"执行失败: {e}")
raise
```
## 配置管理
```python
from unilabos.config.config import BasicConfig, HTTPConfig
# 读取配置
port = BasicConfig.port
is_host = BasicConfig.is_host_mode
# 配置文件: local_config.py
```
## 常用工具
### 单例模式
```python
from unilabos.utils.decorator import singleton
@singleton
class MyManager:
pass
```
### 类型检查
```python
from unilabos.utils.type_check import NoAliasDumper
yaml.dump(data, f, Dumper=NoAliasDumper)
```
### 导入管理
```python
from unilabos.utils.import_manager import get_class
device_class = get_class("unilabos.devices.my_device:MyDevice")
```
## Git 提交规范
提交信息格式:
```
<type>(<scope>): <subject>
<body>
```
类型:
- `feat`: 新功能
- `fix`: 修复bug
- `docs`: 文档更新
- `refactor`: 重构
- `test`: 测试相关
- `chore`: 构建/工具相关
示例:
```
feat(devices): 添加虚拟搅拌器设备
- 实现VirtualStirrer类
- 支持定时搅拌和持续搅拌模式
- 添加速度验证逻辑
```

View File

@@ -1,188 +1,26 @@
# ============================================================
# Uni-Lab-OS Cursor Ignore 配置,控制 Cursor AI 的文件索引范围
# ============================================================
# ==================== 敏感配置文件 ====================
# 本地配置(可能包含密钥)
**/local_config.py
test_config.py
local_test*.py
# 环境变量和密钥
.env
.env.*
**/.certs/
*.pem
*.key
credentials.json
secrets.yaml
# ==================== 二进制和 3D 模型文件 ====================
# 3D 模型文件(无需索引)
*.stl
*.dae
*.glb
*.gltf
*.obj
*.fbx
*.blend
# URDF/Xacro 机器人描述文件大型XML
*.xacro
# 图片文件
*.png
*.jpg
*.jpeg
*.gif
*.webp
*.ico
*.svg
*.bmp
# 压缩包
*.zip
*.tar
*.tar.gz
*.tgz
*.bz2
*.rar
*.7z
# ==================== Python 生成文件 ====================
__pycache__/
*.py[cod]
*$py.class
*.so
*.pyd
*.egg
*.egg-info/
.eggs/
dist/
build/
*.manifest
*.spec
# ==================== IDE 和编辑器 ====================
.idea/
.vscode/
*.swp
*.swo
*~
.#*
# ==================== 测试和覆盖率 ====================
.pytest_cache/
.coverage
.coverage.*
htmlcov/
.tox/
.nox/
coverage.xml
*.cover
# ==================== 虚拟环境 ====================
.venv/
venv/
env/
ENV/
# ==================== ROS 2 生成文件 ====================
# ROS 构建目录
build/
install/
log/
logs/
devel/
# ROS 消息生成
msg_gen/
srv_gen/
msg/*Action.msg
msg/*ActionFeedback.msg
msg/*ActionGoal.msg
msg/*ActionResult.msg
msg/*Feedback.msg
msg/*Goal.msg
msg/*Result.msg
msg/_*.py
srv/_*.py
build_isolated/
devel_isolated/
# ROS 动态配置
*.cfgc
/cfg/cpp/
/cfg/*.py
# ==================== 项目特定目录 ====================
# 工作数据目录
unilabos_data/
# 临时和输出目录
temp/
output/
cursor_docs/
configs/
# 文档构建
docs/_build/
/site
# ==================== 大型数据文件 ====================
# 点云数据
*.pcd
# GraphML 图形文件
*.graphml
# 日志文件
*.log
# 数据库
*.sqlite3
*.db
# Jupyter 检查点
.ipynb_checkpoints/
# ==================== 设备网格资源 ====================
# 3D 网格文件目录(包含大量 STL/DAE 文件)
unilabos/device_mesh/devices/**/*.stl
unilabos/device_mesh/devices/**/*.dae
unilabos/device_mesh/resources/**/*.stl
unilabos/device_mesh/resources/**/*.glb
unilabos/device_mesh/resources/**/*.xacro
# RViz 配置
*.rviz
# ==================== 系统文件 ====================
.DS_Store
Thumbs.db
desktop.ini
# ==================== 锁文件 ====================
poetry.lock
Pipfile.lock
pdm.lock
package-lock.json
yarn.lock
# ==================== 类型检查缓存 ====================
.mypy_cache/
.dmypy.json
.pytype/
.pyre/
.conda
# .github
.idea
# .vscode
output
pylabrobot_repo
recipes
scripts
service
temp
# unilabos/test
# unilabos/app/web
unilabos/device_mesh
unilabos_data
unilabos_msgs
unilabos.egg-info
CONTRIBUTORS
# LICENSE
MANIFEST.in
pyrightconfig.json
# ==================== 其他 ====================
# Catkin
CATKIN_IGNORE
# Eclipse/Qt
.project
.cproject
CMakeLists.txt.user
*.user
qtcreator-*
# README.md
# README_zh.md
setup.py
setup.cfg
.gitattrubutes
**/__pycache__

View File

@@ -1,19 +0,0 @@
version: 2
updates:
# GitHub Actions
- package-ecosystem: "github-actions"
directory: "/"
target-branch: "dev"
schedule:
interval: "weekly"
day: "monday"
time: "06:00"
open-pull-requests-limit: 5
reviewers:
- "msgcenterpy-team"
labels:
- "dependencies"
- "github-actions"
commit-message:
prefix: "ci"
include: "scope"

View File

@@ -1,67 +0,0 @@
name: CI Check
on:
push:
branches: [main, dev]
pull_request:
branches: [main, dev]
jobs:
registry-check:
runs-on: windows-latest
env:
# Fix Unicode encoding issue on Windows runner (cp1252 -> utf-8)
PYTHONIOENCODING: utf-8
PYTHONUTF8: 1
defaults:
run:
shell: cmd
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Setup Miniforge
uses: conda-incubator/setup-miniconda@v3
with:
miniforge-version: latest
use-mamba: true
channels: robostack-staging,conda-forge,uni-lab
channel-priority: flexible
activate-environment: check-env
auto-update-conda: false
show-channel-urls: true
- name: Install ROS dependencies, uv and unilabos-msgs
run: |
echo Installing ROS dependencies...
mamba install -n check-env conda-forge::uv conda-forge::opencv robostack-staging::ros-humble-ros-core robostack-staging::ros-humble-action-msgs robostack-staging::ros-humble-std-msgs robostack-staging::ros-humble-geometry-msgs robostack-staging::ros-humble-control-msgs robostack-staging::ros-humble-nav2-msgs uni-lab::ros-humble-unilabos-msgs robostack-staging::ros-humble-cv-bridge robostack-staging::ros-humble-vision-opencv robostack-staging::ros-humble-tf-transformations robostack-staging::ros-humble-moveit-msgs robostack-staging::ros-humble-tf2-ros robostack-staging::ros-humble-tf2-ros-py conda-forge::transforms3d -c robostack-staging -c conda-forge -c uni-lab -y
- name: Install pip dependencies and unilabos
run: |
call conda activate check-env
echo Installing pip dependencies...
uv pip install -r unilabos/utils/requirements.txt
uv pip install pywinauto git+https://github.com/Xuwznln/pylabrobot.git
uv pip uninstall enum34 || echo enum34 not installed, skipping
uv pip install .
- name: Run check mode (complete_registry)
run: |
call conda activate check-env
echo Running check mode...
python -m unilabos --check_mode --skip_env_check
- name: Check for uncommitted changes
shell: bash
run: |
if ! git diff --exit-code; then
echo "::error::检测到文件变化!请先在本地运行 'python -m unilabos --complete_registry' 并提交变更"
echo "变化的文件:"
git diff --name-only
exit 1
fi
echo "检查通过:无文件变化"

View File

@@ -13,11 +13,6 @@ on:
required: false
default: 'win-64'
type: string
build_full:
description: '是否构建完整版 unilabos-full (默认构建轻量版 unilabos)'
required: false
default: false
type: boolean
jobs:
build-conda-pack:
@@ -62,7 +57,7 @@ jobs:
echo "should_build=false" >> $GITHUB_OUTPUT
fi
- uses: actions/checkout@v6
- uses: actions/checkout@v4
if: steps.should_build.outputs.should_build == 'true'
with:
ref: ${{ github.event.inputs.branch }}
@@ -74,7 +69,7 @@ jobs:
with:
miniforge-version: latest
use-mamba: true
python-version: '3.11.14'
python-version: '3.11.11'
channels: conda-forge,robostack-staging,uni-lab,defaults
channel-priority: flexible
activate-environment: unilab
@@ -86,14 +81,7 @@ jobs:
run: |
echo Installing unilabos and dependencies to unilab environment...
echo Using mamba for faster and more reliable dependency resolution...
echo Build full: ${{ github.event.inputs.build_full }}
if "${{ github.event.inputs.build_full }}"=="true" (
echo Installing unilabos-full ^(complete package^)...
mamba install -n unilab uni-lab::unilabos-full conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
) else (
echo Installing unilabos ^(minimal package^)...
mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
)
mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
- name: Install conda-pack, unilabos and dependencies (Unix)
if: steps.should_build.outputs.should_build == 'true' && matrix.platform != 'win-64'
@@ -101,14 +89,7 @@ jobs:
run: |
echo "Installing unilabos and dependencies to unilab environment..."
echo "Using mamba for faster and more reliable dependency resolution..."
echo "Build full: ${{ github.event.inputs.build_full }}"
if [[ "${{ github.event.inputs.build_full }}" == "true" ]]; then
echo "Installing unilabos-full (complete package)..."
mamba install -n unilab uni-lab::unilabos-full conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
else
echo "Installing unilabos (minimal package)..."
mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
fi
mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
- name: Get latest ros-humble-unilabos-msgs version (Windows)
if: steps.should_build.outputs.should_build == 'true' && matrix.platform == 'win-64'
@@ -312,7 +293,7 @@ jobs:
- name: Upload distribution package
if: steps.should_build.outputs.should_build == 'true'
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v4
with:
name: unilab-pack-${{ matrix.platform }}-${{ github.event.inputs.branch }}
path: dist-package/
@@ -327,12 +308,7 @@ jobs:
echo ==========================================
echo Platform: ${{ matrix.platform }}
echo Branch: ${{ github.event.inputs.branch }}
echo Python version: 3.11.14
if "${{ github.event.inputs.build_full }}"=="true" (
echo Package: unilabos-full ^(complete^)
) else (
echo Package: unilabos ^(minimal^)
)
echo Python version: 3.11.11
echo.
echo Distribution package contents:
dir dist-package
@@ -352,12 +328,7 @@ jobs:
echo "=========================================="
echo "Platform: ${{ matrix.platform }}"
echo "Branch: ${{ github.event.inputs.branch }}"
echo "Python version: 3.11.14"
if [[ "${{ github.event.inputs.build_full }}" == "true" ]]; then
echo "Package: unilabos-full (complete)"
else
echo "Package: unilabos (minimal)"
fi
echo "Python version: 3.11.11"
echo ""
echo "Distribution package contents:"
ls -lh dist-package/

View File

@@ -1,12 +1,10 @@
name: Deploy Docs
on:
# 在 CI Check 成功后自动触发(仅 main 分支)
workflow_run:
workflows: ["CI Check"]
types: [completed]
push:
branches: [main]
pull_request:
branches: [main]
# 手动触发
workflow_dispatch:
inputs:
branch:
@@ -35,19 +33,12 @@ concurrency:
jobs:
# Build documentation
build:
# 只在以下情况运行:
# 1. workflow_run 触发且 CI Check 成功
# 2. 手动触发
if: |
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success')
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v6
uses: actions/checkout@v4
with:
# workflow_run 时使用触发工作流的分支,手动触发时使用输入的分支
ref: ${{ github.event.workflow_run.head_branch || github.event.inputs.branch || github.ref }}
ref: ${{ github.event.inputs.branch || github.ref }}
fetch-depth: 0
- name: Setup Miniforge (with mamba)
@@ -55,7 +46,7 @@ jobs:
with:
miniforge-version: latest
use-mamba: true
python-version: '3.11.14'
python-version: '3.11.11'
channels: conda-forge,robostack-staging,uni-lab,defaults
channel-priority: flexible
activate-environment: unilab
@@ -84,10 +75,8 @@ jobs:
- name: Setup Pages
id: pages
uses: actions/configure-pages@v5
if: |
github.event.workflow_run.head_branch == 'main' ||
(github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
uses: actions/configure-pages@v4
if: github.ref == 'refs/heads/main' || (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
- name: Build Sphinx documentation
run: |
@@ -105,18 +94,14 @@ jobs:
test -f docs/_build/html/index.html && echo "✓ index.html exists" || echo "✗ index.html missing"
- name: Upload build artifacts
uses: actions/upload-pages-artifact@v4
if: |
github.event.workflow_run.head_branch == 'main' ||
(github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
uses: actions/upload-pages-artifact@v3
if: github.ref == 'refs/heads/main' || (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
with:
path: docs/_build/html
# Deploy to GitHub Pages
deploy:
if: |
github.event.workflow_run.head_branch == 'main' ||
(github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
if: github.ref == 'refs/heads/main' || (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}

View File

@@ -1,16 +1,11 @@
name: Multi-Platform Conda Build
on:
# 在 CI Check 工作流完成后触发(仅限 main/dev 分支)
workflow_run:
workflows: ["CI Check"]
types:
- completed
branches: [main, dev]
# 支持 tag 推送(不依赖 CI Check
push:
branches: [main, dev]
tags: ['v*']
# 手动触发
pull_request:
branches: [main, dev]
workflow_dispatch:
inputs:
platforms:
@@ -22,37 +17,9 @@ on:
required: false
default: false
type: boolean
skip_ci_check:
description: '跳过等待 CI Check (手动触发时可选)'
required: false
default: false
type: boolean
jobs:
# 等待 CI Check 完成的 job (仅用于 workflow_run 触发)
wait-for-ci:
runs-on: ubuntu-latest
if: github.event_name == 'workflow_run'
outputs:
should_continue: ${{ steps.check.outputs.should_continue }}
steps:
- name: Check CI status
id: check
run: |
if [[ "${{ github.event.workflow_run.conclusion }}" == "success" ]]; then
echo "should_continue=true" >> $GITHUB_OUTPUT
echo "CI Check passed, proceeding with build"
else
echo "should_continue=false" >> $GITHUB_OUTPUT
echo "CI Check did not succeed (status: ${{ github.event.workflow_run.conclusion }}), skipping build"
fi
build:
needs: [wait-for-ci]
# 运行条件workflow_run 触发且 CI 成功,或者其他触发方式
if: |
always() &&
(needs.wait-for-ci.result == 'skipped' || needs.wait-for-ci.outputs.should_continue == 'true')
strategy:
fail-fast: false
matrix:
@@ -77,10 +44,8 @@ jobs:
shell: bash -l {0}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
with:
# 如果是 workflow_run 触发,使用触发 CI Check 的 commit
ref: ${{ github.event.workflow_run.head_sha || github.ref }}
fetch-depth: 0
- name: Check if platform should be built
@@ -104,6 +69,7 @@ jobs:
channels: conda-forge,robostack-staging,defaults
channel-priority: strict
activate-environment: build-env
auto-activate-base: false
auto-update-conda: false
show-channel-urls: true
@@ -149,7 +115,7 @@ jobs:
- name: Upload conda package artifacts
if: steps.should_build.outputs.should_build == 'true'
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v4
with:
name: conda-package-${{ matrix.platform }}
path: conda-packages-temp

View File

@@ -1,62 +1,25 @@
name: UniLabOS Conda Build
on:
# 在 CI Check 成功后自动触发
workflow_run:
workflows: ["CI Check"]
types: [completed]
branches: [main, dev]
# 标签推送时直接触发(发布版本)
push:
branches: [main, dev]
tags: ['v*']
# 手动触发
pull_request:
branches: [main, dev]
workflow_dispatch:
inputs:
platforms:
description: '选择构建平台 (逗号分隔): linux-64, osx-64, osx-arm64, win-64'
required: false
default: 'linux-64'
build_full:
description: '是否构建 unilabos-full 完整包 (默认只构建 unilabos 基础包)'
required: false
default: false
type: boolean
upload_to_anaconda:
description: '是否上传到Anaconda.org'
required: false
default: false
type: boolean
skip_ci_check:
description: '跳过等待 CI Check (手动触发时可选)'
required: false
default: false
type: boolean
jobs:
# 等待 CI Check 完成的 job (仅用于 workflow_run 触发)
wait-for-ci:
runs-on: ubuntu-latest
if: github.event_name == 'workflow_run'
outputs:
should_continue: ${{ steps.check.outputs.should_continue }}
steps:
- name: Check CI status
id: check
run: |
if [[ "${{ github.event.workflow_run.conclusion }}" == "success" ]]; then
echo "should_continue=true" >> $GITHUB_OUTPUT
echo "CI Check passed, proceeding with build"
else
echo "should_continue=false" >> $GITHUB_OUTPUT
echo "CI Check did not succeed (status: ${{ github.event.workflow_run.conclusion }}), skipping build"
fi
build:
needs: [wait-for-ci]
# 运行条件workflow_run 触发且 CI 成功,或者其他触发方式
if: |
always() &&
(needs.wait-for-ci.result == 'skipped' || needs.wait-for-ci.outputs.should_continue == 'true')
strategy:
fail-fast: false
matrix:
@@ -77,10 +40,8 @@ jobs:
shell: bash -l {0}
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
with:
# 如果是 workflow_run 触发,使用触发 CI Check 的 commit
ref: ${{ github.event.workflow_run.head_sha || github.ref }}
fetch-depth: 0
- name: Check if platform should be built
@@ -104,6 +65,7 @@ jobs:
channels: conda-forge,robostack-staging,uni-lab,defaults
channel-priority: strict
activate-environment: build-env
auto-activate-base: false
auto-update-conda: false
show-channel-urls: true
@@ -119,61 +81,12 @@ jobs:
conda list | grep -E "(rattler-build|anaconda-client)"
echo "Platform: ${{ matrix.platform }}"
echo "OS: ${{ matrix.os }}"
echo "Build full package: ${{ github.event.inputs.build_full || 'false' }}"
echo "Building packages:"
echo " - unilabos-env (environment dependencies)"
echo " - unilabos (with pip package)"
if [[ "${{ github.event.inputs.build_full }}" == "true" ]]; then
echo " - unilabos-full (complete package)"
fi
echo "Building UniLabOS package"
- name: Build unilabos-env (conda environment only, noarch)
- name: Build conda package
if: steps.should_build.outputs.should_build == 'true'
run: |
echo "Building unilabos-env (conda environment dependencies)..."
rattler-build build -r .conda/environment/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge
- name: Upload unilabos-env to Anaconda.org (if enabled)
if: steps.should_build.outputs.should_build == 'true' && github.event.inputs.upload_to_anaconda == 'true'
run: |
echo "Uploading unilabos-env to uni-lab organization..."
for package in $(find ./output -name "unilabos-env*.conda"); do
anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package"
done
- name: Build unilabos (with pip package)
if: steps.should_build.outputs.should_build == 'true'
run: |
echo "Building unilabos package..."
# 如果已上传到 Anaconda从 uni-lab channel 获取 unilabos-env否则从本地 output 获取
rattler-build build -r .conda/base/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge --channel ./output
- name: Upload unilabos to Anaconda.org (if enabled)
if: steps.should_build.outputs.should_build == 'true' && github.event.inputs.upload_to_anaconda == 'true'
run: |
echo "Uploading unilabos to uni-lab organization..."
for package in $(find ./output -name "unilabos-0*.conda" -o -name "unilabos-[0-9]*.conda"); do
anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package"
done
- name: Build unilabos-full - Only when explicitly requested
if: |
steps.should_build.outputs.should_build == 'true' &&
github.event.inputs.build_full == 'true'
run: |
echo "Building unilabos-full package on ${{ matrix.platform }}..."
rattler-build build -r .conda/full/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge --channel ./output
- name: Upload unilabos-full to Anaconda.org (if enabled)
if: |
steps.should_build.outputs.should_build == 'true' &&
github.event.inputs.build_full == 'true' &&
github.event.inputs.upload_to_anaconda == 'true'
run: |
echo "Uploading unilabos-full to uni-lab organization..."
for package in $(find ./output -name "unilabos-full*.conda"); do
anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package"
done
rattler-build build -r .conda/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge
- name: List built packages
if: steps.should_build.outputs.should_build == 'true'
@@ -195,9 +108,17 @@ jobs:
- name: Upload conda package artifacts
if: steps.should_build.outputs.should_build == 'true'
uses: actions/upload-artifact@v6
uses: actions/upload-artifact@v4
with:
name: conda-package-unilabos-${{ matrix.platform }}
path: conda-packages-temp
if-no-files-found: warn
retention-days: 30
- name: Upload to Anaconda.org (uni-lab organization)
if: github.event.inputs.upload_to_anaconda == 'true'
run: |
for package in $(find ./output -name "*.conda"); do
echo "Uploading $package to uni-lab organization..."
anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package"
done

1
.gitignore vendored
View File

@@ -4,7 +4,6 @@ temp/
output/
unilabos_data/
pyrightconfig.json
.cursorignore
## Python
# Byte-compiled / optimized / DLL files

View File

@@ -1,5 +1,4 @@
recursive-include unilabos/test *
recursive-include unilabos/utils *
recursive-include unilabos/registry *.yaml
recursive-include unilabos/app/web/static *
recursive-include unilabos/app/web/templates *

View File

@@ -31,46 +31,26 @@ Detailed documentation can be found at:
## Quick Start
### 1. Setup Conda Environment
1. Setup Conda Environment
Uni-Lab-OS recommends using `mamba` for environment management. Choose the package that fits your needs:
| Package | Use Case | Contents |
|---------|----------|----------|
| `unilabos` | **Recommended for most users** | Complete package, ready to use |
| `unilabos-env` | Developers (editable install) | Environment only, install unilabos via pip |
| `unilabos-full` | Simulation/Visualization | unilabos + ROS2 Desktop + Gazebo + MoveIt |
Uni-Lab-OS recommends using `mamba` for environment management:
```bash
# Create new environment
mamba create -n unilab python=3.11.14
mamba create -n unilab python=3.11.11
mamba activate unilab
# Option A: Standard installation (recommended for most users)
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
# Option B: For developers (editable mode development)
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
# Then install unilabos and dependencies:
git clone https://github.com/deepmodeling/Uni-Lab-OS.git && cd Uni-Lab-OS
pip install -e .
uv pip install -r unilabos/utils/requirements.txt
# Option C: Full installation (simulation/visualization)
mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge
mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge
```
**When to use which?**
- **unilabos**: Standard installation for production deployment and general usage (recommended)
- **unilabos-env**: For developers who need `pip install -e .` editable mode, modify source code
- **unilabos-full**: For simulation (Gazebo), visualization (rviz2), and Jupyter notebooks
### 2. Clone Repository (Optional, for developers)
2. Install Dev Uni-Lab-OS
```bash
# Clone the repository (only needed for development or examples)
# Clone the repository
git clone https://github.com/deepmodeling/Uni-Lab-OS.git
cd Uni-Lab-OS
# Install Uni-Lab-OS
pip install .
```
3. Start Uni-Lab System

View File

@@ -31,46 +31,26 @@ Uni-Lab-OS 是一个用于实验室自动化的综合平台,旨在连接和控
## 快速开始
### 1. 配置 Conda 环境
1. 配置 Conda 环境
Uni-Lab-OS 建议使用 `mamba` 管理环境。根据您的需求选择合适的安装包:
| 安装包 | 适用场景 | 包含内容 |
|--------|----------|----------|
| `unilabos` | **推荐大多数用户** | 完整安装包,开箱即用 |
| `unilabos-env` | 开发者(可编辑安装) | 仅环境依赖,通过 pip 安装 unilabos |
| `unilabos-full` | 仿真/可视化 | unilabos + ROS2 桌面版 + Gazebo + MoveIt |
Uni-Lab-OS 建议使用 `mamba` 管理环境。根据您的操作系统选择适当的环境文件:
```bash
# 创建新环境
mamba create -n unilab python=3.11.14
mamba create -n unilab python=3.11.11
mamba activate unilab
# 方案 A标准安装推荐大多数用户
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
# 方案 B开发者环境可编辑模式开发
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
# 然后安装 unilabos 和依赖:
git clone https://github.com/deepmodeling/Uni-Lab-OS.git && cd Uni-Lab-OS
pip install -e .
uv pip install -r unilabos/utils/requirements.txt
# 方案 C完整安装仿真/可视化)
mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge
mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge
```
**如何选择?**
- **unilabos**:标准安装,适用于生产部署和日常使用(推荐)
- **unilabos-env**:开发者使用,支持 `pip install -e .` 可编辑模式,可修改源代码
- **unilabos-full**需要仿真Gazebo、可视化rviz2或 Jupyter Notebook
### 2. 克隆仓库(可选,供开发者使用)
2. 安装开发版 Uni-Lab-OS:
```bash
# 克隆仓库(仅开发或查看示例时需要)
# 克隆仓库
git clone https://github.com/deepmodeling/Uni-Lab-OS.git
cd Uni-Lab-OS
# 安装 Uni-Lab-OS
pip install .
```
3. 启动 Uni-Lab 系统

View File

@@ -31,14 +31,6 @@
详细的安装步骤请参考 [安装指南](installation.md)。
**选择合适的安装包:**
| 安装包 | 适用场景 | 包含组件 |
|--------|----------|----------|
| `unilabos` | **推荐大多数用户**,生产部署 | 完整安装包,开箱即用 |
| `unilabos-env` | 开发者(可编辑安装) | 仅环境依赖,通过 pip 安装 unilabos |
| `unilabos-full` | 仿真/可视化 | unilabos + 完整 ROS2 桌面版 + Gazebo + MoveIt |
**关键步骤:**
```bash
@@ -46,30 +38,15 @@
# 下载 Miniforge: https://github.com/conda-forge/miniforge/releases
# 2. 创建 Conda 环境
mamba create -n unilab python=3.11.14
mamba create -n unilab python=3.11.11
# 3. 激活环境
mamba activate unilab
# 4. 安装 Uni-Lab-OS(选择其一)
# 方案 A标准安装推荐大多数用户
# 4. 安装 Uni-Lab-OS
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
# 方案 B开发者环境可编辑模式开发
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
pip install -e /path/to/Uni-Lab-OS # 可编辑安装
uv pip install -r unilabos/utils/requirements.txt # 安装 pip 依赖
# 方案 C完整版仿真/可视化)
mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge
```
**选择建议:**
- **日常使用/生产部署**:使用 `unilabos`(推荐),完整功能,开箱即用
- **开发者**:使用 `unilabos-env` + `pip install -e .` + `uv pip install -r unilabos/utils/requirements.txt`,代码修改立即生效
- **仿真/可视化**:使用 `unilabos-full`,含 Gazebo、rviz2、MoveIt
#### 1.2 验证安装
```bash
@@ -439,9 +416,6 @@ unilab --ak your_ak --sk your_sk -g test/experiments/mock_devices/mock_all.json
1. 访问 Web 界面,进入"仪器耗材"模块
2. 在"仪器设备"区域找到并添加上述设备
3. 在"物料耗材"区域找到并添加容器
4. 在workstation中配置protocol_type包含PumpTransferProtocol
![添加Protocol类型](image/add_protocol.png)
![物料列表](image/material.png)
@@ -794,43 +768,7 @@ Waiting for host service...
详细的设备驱动编写指南请参考 [添加设备驱动](../developer_guide/add_device.md)。
#### 9.1 开发环境准备
**推荐使用 `unilabos-env` + `pip install -e .` + `uv pip install`** 进行设备开发:
```bash
# 1. 创建环境并安装 unilabos-envROS2 + conda 依赖 + uv
mamba create -n unilab python=3.11.14
conda activate unilab
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
# 2. 克隆代码
git clone https://github.com/deepmodeling/Uni-Lab-OS.git
cd Uni-Lab-OS
# 3. 以可编辑模式安装(推荐使用脚本,自动检测中文环境)
python scripts/dev_install.py
# 或手动安装:
pip install -e .
uv pip install -r unilabos/utils/requirements.txt
```
**为什么使用这种方式?**
- `unilabos-env` 提供 ROS2 核心组件和 uv通过 conda 安装,避免编译)
- `unilabos/utils/requirements.txt` 包含所有运行时需要的 pip 依赖
- `dev_install.py` 自动检测中文环境,中文系统自动使用清华镜像
- 使用 `uv` 替代 `pip`,安装速度更快
- 可编辑模式:代码修改**立即生效**,无需重新安装
**如果安装失败或速度太慢**,可以手动执行(使用清华镜像):
```bash
pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
uv pip install -r unilabos/utils/requirements.txt -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
```
#### 9.2 为什么需要自定义设备?
#### 9.1 为什么需要自定义设备?
Uni-Lab-OS 内置了常见设备,但您的实验室可能有特殊设备需要集成:
@@ -839,7 +777,7 @@ Uni-Lab-OS 内置了常见设备,但您的实验室可能有特殊设备需要
- 特殊的实验流程
- 第三方设备集成
#### 9.3 创建 Python 包
#### 9.2 创建 Python 包
为了方便开发和管理,建议为您的实验室创建独立的 Python 包。
@@ -876,7 +814,7 @@ touch my_lab_devices/my_lab_devices/__init__.py
touch my_lab_devices/my_lab_devices/devices/__init__.py
```
#### 9.4 创建 setup.py
#### 9.3 创建 setup.py
```python
# my_lab_devices/setup.py
@@ -907,7 +845,7 @@ setup(
)
```
#### 9.5 开发安装
#### 9.4 开发安装
使用 `-e` 参数进行可编辑安装,这样代码修改后立即生效:
@@ -922,7 +860,7 @@ pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
- 方便调试和测试
- 支持版本控制git
#### 9.6 编写设备驱动
#### 9.5 编写设备驱动
创建设备驱动文件:
@@ -1063,7 +1001,7 @@ class MyPump:
- **返回 Dict**:所有动作方法返回字典类型
- **文档字符串**:详细说明参数和功能
#### 9.7 测试设备驱动
#### 9.6 测试设备驱动
创建简单的测试脚本:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

View File

@@ -13,26 +13,15 @@
- 开发者需要 Git 和基本的 Python 开发知识
- 自定义 msgs 需要 GitHub 账号
## 安装包选择
Uni-Lab-OS 提供三个安装包版本,根据您的需求选择:
| 安装包 | 适用场景 | 包含组件 | 磁盘占用 |
|--------|----------|----------|----------|
| **unilabos** | **推荐大多数用户**,生产部署 | 完整安装包,开箱即用 | ~2-3 GB |
| **unilabos-env** | 开发者环境(可编辑安装) | 仅环境依赖,通过 pip 安装 unilabos | ~2 GB |
| **unilabos-full** | 仿真可视化、完整功能体验 | unilabos + 完整 ROS2 桌面版 + Gazebo + MoveIt | ~8-10 GB |
## 安装方式选择
根据您的使用场景,选择合适的安装方式:
| 安装方式 | 适用人群 | 推荐安装包 | 特点 | 安装时间 |
| ---------------------- | -------------------- | ----------------- | ------------------------------ | ---------------------------- |
| **方式一:一键安装** | 快速体验、演示 | 预打包环境 | 离线可用,无需配置 | 5-10 分钟 (网络良好的情况下) |
| **方式二:手动安装** | **大多数用户** | `unilabos` | 完整功能,开箱即用 | 10-20 分钟 |
| **方式三:开发者安装** | 开发者、需要修改源码 | `unilabos-env` | 可编辑模式,支持自定义开发 | 20-30 分钟 |
| **仿真/可视化** | 仿真测试、可视化调试 | `unilabos-full` | 含 Gazebo、rviz2、MoveIt | 30-60 分钟 |
| 安装方式 | 适用人群 | 特点 | 安装时间 |
| ---------------------- | -------------------- | ------------------------------ | ---------------------------- |
| **方式一:一键安装** | 实验室用户、快速体验 | 预打包环境,离线可用,无需配置 | 5-10 分钟 (网络良好的情况下) |
| **方式二:手动安装** | 标准用户、生产环境 | 灵活配置,版本可控 | 10-20 分钟 |
| **方式三:开发者安装** | 开发者、需要修改源码 | 可编辑模式,支持自定义 msgs | 20-30 分钟 |
---
@@ -155,38 +144,17 @@ bash Miniforge3-$(uname)-$(uname -m).sh
使用以下命令创建 Uni-Lab 专用环境:
```bash
mamba create -n unilab python=3.11.14 # 目前ros2组件依赖版本大多为3.11.14
mamba create -n unilab python=3.11.11 # 目前ros2组件依赖版本大多为3.11.11
mamba activate unilab
# 选择安装包(三选一):
# 方案 A标准安装推荐大多数用户
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
# 方案 B开发者环境可编辑模式开发
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
# 然后安装 unilabos 和 pip 依赖:
git clone https://github.com/deepmodeling/Uni-Lab-OS.git && cd Uni-Lab-OS
pip install -e .
uv pip install -r unilabos/utils/requirements.txt
# 方案 C完整版含仿真和可视化工具
mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge
mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge
```
**参数说明**:
- `-n unilab`: 创建名为 "unilab" 的环境
- `uni-lab::unilabos`: 安装 unilabos 完整包,开箱即用(推荐)
- `uni-lab::unilabos-env`: 仅安装环境依赖,适合开发者使用 `pip install -e .`
- `uni-lab::unilabos-full`: 安装完整包(含 ROS2 Desktop、Gazebo、MoveIt 等)
- `uni-lab::unilabos`: 从 uni-lab channel 安装 unilabos 包
- `-c robostack-staging -c conda-forge`: 添加额外的软件源
**包选择建议**
- **日常使用/生产部署**:安装 `unilabos`(推荐,完整功能,开箱即用)
- **开发者**:安装 `unilabos-env`,然后使用 `uv pip install -r unilabos/utils/requirements.txt` 安装依赖,再 `pip install -e .` 进行可编辑安装
- **仿真/可视化**:安装 `unilabos-full`Gazebo、rviz2、MoveIt
**如果遇到网络问题**,可以使用清华镜像源加速下载:
```bash
@@ -195,14 +163,8 @@ mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/m
mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/
# 然后重新执行安装命令(推荐标准安装)
# 然后重新执行安装命令
mamba create -n unilab uni-lab::unilabos -c robostack-staging
# 或完整版(仿真/可视化)
mamba create -n unilab uni-lab::unilabos-full -c robostack-staging
# pip 安装时使用清华镜像(开发者安装时使用)
uv pip install -r unilabos/utils/requirements.txt -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
```
### 第三步:激活环境
@@ -241,87 +203,58 @@ cd Uni-Lab-OS
cd Uni-Lab-OS
```
### 第二步:安装开发环境unilabos-env
### 第二步:安装基础环境
**重要**:开发者请使用 `unilabos-env` 包,它专为开发者设计:
- 包含 ROS2 核心组件和消息包ros-humble-ros-core、std-msgs、geometry-msgs 等)
- 包含 transforms3d、cv-bridge、tf2 等 conda 依赖
- 包含 `uv` 工具,用于快速安装 pip 依赖
- **不包含** pip 依赖和 unilabos 包(由 `pip install -e .` 和 `uv pip install` 安装)
**推荐方式**:先通过**方式一(一键安装)**或**方式二(手动安装)**完成基础环境的安装这将包含所有必需的依赖项ROS2、msgs 等)。
#### 选项 A通过一键安装推荐
参考上文"方式一:一键安装",完成基础环境的安装后,激活环境:
```bash
# 创建并激活环境
mamba create -n unilab python=3.11.14
conda activate unilab
# 安装开发者环境包ROS2 + conda 依赖 + uv
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
```
### 第三步:安装 pip 依赖和可编辑模式安装
#### 选项 B通过手动安装
克隆代码并安装依赖
参考上文"方式二:手动安装",创建并安装环境
```bash
mamba create -n unilab python=3.11.11
conda activate unilab
mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge
```
**说明**:这会安装包括 Python 3.11.11、ROS2 Humble、ros-humble-unilabos-msgs 和所有必需依赖
### 第三步:切换到开发版本
现在你已经有了一个完整可用的 Uni-Lab 环境,接下来将 unilabos 包切换为开发版本:
```bash
# 确保环境已激活
conda activate unilab
# 克隆仓库(如果还未克隆
git clone https://github.com/deepmodeling/Uni-Lab-OS.git
cd Uni-Lab-OS
# 卸载 pip 安装的 unilabos保留所有 conda 依赖
pip uninstall unilabos -y
# 切换到 dev 分支(可选
# 克隆 dev 分支(如果还未克隆
cd /path/to/your/workspace
git clone -b dev https://github.com/deepmodeling/Uni-Lab-OS.git
# 或者如果已经克隆,切换到 dev 分支
cd Uni-Lab-OS
git checkout dev
git pull
```
**推荐:使用安装脚本**(自动检测中文环境,使用 uv 加速):
```bash
# 自动检测中文环境,如果是中文系统则使用清华镜像
python scripts/dev_install.py
# 或者手动指定:
python scripts/dev_install.py --china # 强制使用清华镜像
python scripts/dev_install.py --no-mirror # 强制使用 PyPI
python scripts/dev_install.py --skip-deps # 跳过 pip 依赖安装
python scripts/dev_install.py --use-pip # 使用 pip 而非 uv
```
**手动安装**(如果脚本安装失败或速度太慢):
```bash
# 1. 安装 unilabos可编辑模式
pip install -e .
# 2. 使用 uv 安装 pip 依赖(推荐,速度更快)
uv pip install -r unilabos/utils/requirements.txt
# 国内用户使用清华镜像:
# 以可编辑模式安装开发版 unilabos
pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
uv pip install -r unilabos/utils/requirements.txt -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
```
**注意**
- `uv` 已包含在 `unilabos-env` 中,无需单独安装
- `unilabos/utils/requirements.txt` 包含运行 unilabos 所需的所有 pip 依赖
- 部分特殊包(如 pylabrobot会在运行时由 unilabos 自动检测并安装
**参数说明**
**为什么使用可编辑模式?**
- `-e` (editable mode):代码修改**立即生效**,无需重新安装
- 适合开发调试:修改代码后直接运行测试
- 与 `unilabos-env` 配合:环境依赖由 conda 管理unilabos 代码由 pip 管理
**验证安装**
```bash
# 检查 unilabos 版本
python -c "import unilabos; print(unilabos.__version__)"
# 检查安装位置(应该指向你的代码目录)
pip show unilabos | grep Location
```
- `-e`: editable mode可编辑模式代码修改立即生效无需重新安装
- `-i`: 使用清华镜像源加速下载
- `pip uninstall unilabos`: 只卸载 pip 安装的 unilabos 包,不影响 conda 安装的其他依赖(如 ROS2、msgs 等)
### 第四步:安装或自定义 ros-humble-unilabos-msgs可选
@@ -531,45 +464,7 @@ cd $CONDA_PREFIX/envs/unilab
### 问题 8: 环境很大,有办法减小吗?
**解决方案**:
1. **使用 `unilabos` 标准版**(推荐大多数用户):
```bash
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
```
标准版包含完整功能,环境大小约 2-3GB相比完整版的 8-10GB
2. **使用 `unilabos-env` 开发者版**(最小化):
```bash
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
# 然后手动安装依赖
pip install -e .
uv pip install -r unilabos/utils/requirements.txt
```
开发者版只包含环境依赖,体积最小约 2GB。
3. **按需安装额外组件**
如果后续需要特定功能,可以单独安装:
```bash
# 需要 Jupyter
mamba install jupyter jupyros
# 需要可视化
mamba install matplotlib opencv
# 需要仿真(注意:这会安装大量依赖)
mamba install ros-humble-gazebo-ros
```
4. **预打包环境问题**
预打包环境(方式一)包含所有依赖,通常较大(压缩后 2-5GB。这是为了确保离线安装和完整功能。
**包选择建议**
| 需求 | 推荐包 | 预估大小 |
|------|--------|----------|
| 日常使用/生产部署 | `unilabos` | ~2-3 GB |
| 开发调试(可编辑模式) | `unilabos-env` | ~2 GB |
| 仿真/可视化 | `unilabos-full` | ~8-10 GB |
**解决方案**: 预打包的环境包含所有依赖,通常较大(压缩后 2-5GB。这是为了确保离线安装和完整功能。如果空间有限考虑使用方式二手动安装只安装需要的组件。
### 问题 9: 如何更新到最新版本?
@@ -616,7 +511,6 @@ mamba update ros-humble-unilabos-msgs -c uni-lab -c robostack-staging -c conda-f
**提示**:
- **大多数用户**推荐使用方式二(手动安装)的 `unilabos` 标准版
- **开发者**推荐使用方式三(开发者安装),安装 `unilabos-env` 后使用 `uv pip install -r unilabos/utils/requirements.txt` 安装依赖
- **仿真/可视化**推荐安装 `unilabos-full` 完整版
- **快速体验和演示**推荐使用方式一(一键安装)
- 生产环境推荐使用方式二(手动安装)的稳定版本
- 开发和测试推荐使用方式三(开发者安装)
- 快速体验和演示推荐使用方式一(一键安装)

View File

@@ -1,6 +1,6 @@
package:
name: ros-humble-unilabos-msgs
version: 0.10.17
version: 0.10.15
source:
path: ../../unilabos_msgs
target_directory: src
@@ -25,7 +25,7 @@ requirements:
build:
- ${{ compiler('cxx') }}
- ${{ compiler('c') }}
- python ==3.11.14
- python ==3.11.11
- numpy
- if: build_platform != target_platform
then:
@@ -63,14 +63,14 @@ requirements:
- robostack-staging::ros-humble-rosidl-default-generators
- robostack-staging::ros-humble-std-msgs
- robostack-staging::ros-humble-geometry-msgs
- robostack-staging::ros2-distro-mutex=0.7
- robostack-staging::ros2-distro-mutex=0.6
run:
- robostack-staging::ros-humble-action-msgs
- robostack-staging::ros-humble-ros-workspace
- robostack-staging::ros-humble-rosidl-default-runtime
- robostack-staging::ros-humble-std-msgs
- robostack-staging::ros-humble-geometry-msgs
- robostack-staging::ros2-distro-mutex=0.7
- robostack-staging::ros2-distro-mutex=0.6
- if: osx and x86_64
then:
- __osx >=${{ MACOSX_DEPLOYMENT_TARGET|default('10.14') }}

View File

@@ -1,6 +1,6 @@
package:
name: unilabos
version: "0.10.17"
version: "0.10.15"
source:
path: ../..

View File

@@ -85,7 +85,7 @@ Verification:
-------------
The verify_installation.py script will check:
- Python version (3.11.14)
- Python version (3.11.11)
- ROS2 rclpy installation
- UniLabOS installation and dependencies
@@ -104,7 +104,7 @@ Build Information:
Branch: {branch}
Platform: {platform}
Python: 3.11.14
Python: 3.11.11
Date: {build_date}
Troubleshooting:

View File

@@ -1,214 +0,0 @@
#!/usr/bin/env python3
"""
Development installation script for UniLabOS.
Auto-detects Chinese locale and uses appropriate mirror.
Usage:
python scripts/dev_install.py
python scripts/dev_install.py --no-mirror # Force no mirror
python scripts/dev_install.py --china # Force China mirror
python scripts/dev_install.py --skip-deps # Skip pip dependencies installation
Flow:
1. pip install -e . (install unilabos in editable mode)
2. Detect Chinese locale
3. Use uv to install pip dependencies from requirements.txt
4. Special packages (like pylabrobot) are handled by environment_check.py at runtime
"""
import locale
import subprocess
import sys
import argparse
from pathlib import Path
# Tsinghua mirror URL
TSINGHUA_MIRROR = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
def is_chinese_locale() -> bool:
"""
Detect if system is in Chinese locale.
Same logic as EnvironmentChecker._is_chinese_locale()
"""
try:
lang = locale.getdefaultlocale()[0]
if lang and ("zh" in lang.lower() or "chinese" in lang.lower()):
return True
except Exception:
pass
return False
def run_command(cmd: list, description: str, retry: int = 2) -> bool:
"""Run command with retry support."""
print(f"[INFO] {description}")
print(f"[CMD] {' '.join(cmd)}")
for attempt in range(retry + 1):
try:
result = subprocess.run(cmd, check=True, timeout=600)
print(f"[OK] {description}")
return True
except subprocess.CalledProcessError as e:
if attempt < retry:
print(f"[WARN] Attempt {attempt + 1} failed, retrying...")
else:
print(f"[ERROR] {description} failed: {e}")
return False
except subprocess.TimeoutExpired:
print(f"[ERROR] {description} timed out")
return False
return False
def install_editable(project_root: Path, use_mirror: bool) -> bool:
"""Install unilabos in editable mode using pip."""
cmd = [sys.executable, "-m", "pip", "install", "-e", str(project_root)]
if use_mirror:
cmd.extend(["-i", TSINGHUA_MIRROR])
return run_command(cmd, "Installing unilabos in editable mode")
def install_requirements_uv(requirements_file: Path, use_mirror: bool) -> bool:
"""Install pip dependencies using uv (installed via conda-forge::uv)."""
cmd = ["uv", "pip", "install", "-r", str(requirements_file)]
if use_mirror:
cmd.extend(["-i", TSINGHUA_MIRROR])
return run_command(cmd, "Installing pip dependencies with uv", retry=2)
def install_requirements_pip(requirements_file: Path, use_mirror: bool) -> bool:
"""Fallback: Install pip dependencies using pip."""
cmd = [sys.executable, "-m", "pip", "install", "-r", str(requirements_file)]
if use_mirror:
cmd.extend(["-i", TSINGHUA_MIRROR])
return run_command(cmd, "Installing pip dependencies with pip", retry=2)
def check_uv_available() -> bool:
"""Check if uv is available (installed via conda-forge::uv)."""
try:
subprocess.run(["uv", "--version"], capture_output=True, check=True)
return True
except (subprocess.CalledProcessError, FileNotFoundError):
return False
def main():
parser = argparse.ArgumentParser(description="Development installation script for UniLabOS")
parser.add_argument("--china", action="store_true", help="Force use China mirror (Tsinghua)")
parser.add_argument("--no-mirror", action="store_true", help="Force use default PyPI (no mirror)")
parser.add_argument(
"--skip-deps", action="store_true", help="Skip pip dependencies installation (only install unilabos)"
)
parser.add_argument("--use-pip", action="store_true", help="Use pip instead of uv for dependencies")
args = parser.parse_args()
# Determine project root
script_dir = Path(__file__).parent
project_root = script_dir.parent
requirements_file = project_root / "unilabos" / "utils" / "requirements.txt"
if not (project_root / "setup.py").exists():
print(f"[ERROR] setup.py not found in {project_root}")
sys.exit(1)
print("=" * 60)
print("UniLabOS Development Installation")
print("=" * 60)
print(f"Project root: {project_root}")
print()
# Determine mirror usage based on locale
if args.no_mirror:
use_mirror = False
print("[INFO] Mirror disabled by --no-mirror flag")
elif args.china:
use_mirror = True
print("[INFO] China mirror enabled by --china flag")
else:
use_mirror = is_chinese_locale()
if use_mirror:
print("[INFO] Chinese locale detected, using Tsinghua mirror")
else:
print("[INFO] Non-Chinese locale detected, using default PyPI")
print()
# Step 1: Install unilabos in editable mode
print("[STEP 1] Installing unilabos in editable mode...")
if not install_editable(project_root, use_mirror):
print("[ERROR] Failed to install unilabos")
print()
print("Manual fallback:")
if use_mirror:
print(f" pip install -e {project_root} -i {TSINGHUA_MIRROR}")
else:
print(f" pip install -e {project_root}")
sys.exit(1)
print()
# Step 2: Install pip dependencies
if args.skip_deps:
print("[INFO] Skipping pip dependencies installation (--skip-deps)")
else:
print("[STEP 2] Installing pip dependencies...")
if not requirements_file.exists():
print(f"[WARN] Requirements file not found: {requirements_file}")
print("[INFO] Skipping dependencies installation")
else:
# Try uv first (faster), fallback to pip
if args.use_pip:
print("[INFO] Using pip (--use-pip flag)")
success = install_requirements_pip(requirements_file, use_mirror)
elif check_uv_available():
print("[INFO] Using uv (installed via conda-forge::uv)")
success = install_requirements_uv(requirements_file, use_mirror)
if not success:
print("[WARN] uv failed, falling back to pip...")
success = install_requirements_pip(requirements_file, use_mirror)
else:
print("[WARN] uv not available (should be installed via: mamba install conda-forge::uv)")
print("[INFO] Falling back to pip...")
success = install_requirements_pip(requirements_file, use_mirror)
if not success:
print()
print("[WARN] Failed to install some dependencies automatically.")
print("You can manually install them:")
if use_mirror:
print(f" uv pip install -r {requirements_file} -i {TSINGHUA_MIRROR}")
print(" or:")
print(f" pip install -r {requirements_file} -i {TSINGHUA_MIRROR}")
else:
print(f" uv pip install -r {requirements_file}")
print(" or:")
print(f" pip install -r {requirements_file}")
print()
print("=" * 60)
print("Installation complete!")
print("=" * 60)
print()
print("Note: Some special packages (like pylabrobot) are installed")
print("automatically at runtime by unilabos if needed.")
print()
print("Verify installation:")
print(' python -c "import unilabos; print(unilabos.__version__)"')
print()
print("If you encounter issues, you can manually install dependencies:")
if use_mirror:
print(f" uv pip install -r unilabos/utils/requirements.txt -i {TSINGHUA_MIRROR}")
else:
print(" uv pip install -r unilabos/utils/requirements.txt")
print()
if __name__ == "__main__":
main()

View File

@@ -4,7 +4,7 @@ package_name = 'unilabos'
setup(
name=package_name,
version='0.10.17',
version='0.10.15',
packages=find_packages(),
include_package_data=True,
install_requires=['setuptools'],

View File

@@ -1,213 +0,0 @@
{
"workflow": [
{
"action": "transfer_liquid",
"action_args": {
"sources": "cell_lines",
"targets": "Liquid_1",
"asp_vol": 100.0,
"dis_vol": 74.75,
"asp_flow_rate": 94.0,
"dis_flow_rate": 95.5
}
},
{
"action": "transfer_liquid",
"action_args": {
"sources": "cell_lines",
"targets": "Liquid_2",
"asp_vol": 100.0,
"dis_vol": 74.75,
"asp_flow_rate": 94.0,
"dis_flow_rate": 95.5
}
},
{
"action": "transfer_liquid",
"action_args": {
"sources": "cell_lines",
"targets": "Liquid_3",
"asp_vol": 100.0,
"dis_vol": 74.75,
"asp_flow_rate": 94.0,
"dis_flow_rate": 95.5
}
},
{
"action": "transfer_liquid",
"action_args": {
"sources": "cell_lines_2",
"targets": "Liquid_4",
"asp_vol": 100.0,
"dis_vol": 74.75,
"asp_flow_rate": 94.0,
"dis_flow_rate": 95.5
}
},
{
"action": "transfer_liquid",
"action_args": {
"sources": "cell_lines_2",
"targets": "Liquid_5",
"asp_vol": 100.0,
"dis_vol": 74.75,
"asp_flow_rate": 94.0,
"dis_flow_rate": 95.5
}
},
{
"action": "transfer_liquid",
"action_args": {
"sources": "cell_lines_2",
"targets": "Liquid_6",
"asp_vol": 100.0,
"dis_vol": 74.75,
"asp_flow_rate": 94.0,
"dis_flow_rate": 95.5
}
},
{
"action": "transfer_liquid",
"action_args": {
"sources": "cell_lines_3",
"targets": "dest_set",
"asp_vol": 100.0,
"dis_vol": 74.75,
"asp_flow_rate": 94.0,
"dis_flow_rate": 95.5
}
},
{
"action": "transfer_liquid",
"action_args": {
"sources": "cell_lines_3",
"targets": "dest_set_2",
"asp_vol": 100.0,
"dis_vol": 74.75,
"asp_flow_rate": 94.0,
"dis_flow_rate": 95.5
}
},
{
"action": "transfer_liquid",
"action_args": {
"sources": "cell_lines_3",
"targets": "dest_set_3",
"asp_vol": 100.0,
"dis_vol": 74.75,
"asp_flow_rate": 94.0,
"dis_flow_rate": 95.5
}
}
],
"reagent": {
"Liquid_1": {
"slot": 1,
"well": [
"A4",
"A7",
"A10"
],
"labware": "rep 1"
},
"Liquid_4": {
"slot": 1,
"well": [
"A4",
"A7",
"A10"
],
"labware": "rep 1"
},
"dest_set": {
"slot": 1,
"well": [
"A4",
"A7",
"A10"
],
"labware": "rep 1"
},
"Liquid_2": {
"slot": 2,
"well": [
"A3",
"A5",
"A8"
],
"labware": "rep 2"
},
"Liquid_5": {
"slot": 2,
"well": [
"A3",
"A5",
"A8"
],
"labware": "rep 2"
},
"dest_set_2": {
"slot": 2,
"well": [
"A3",
"A5",
"A8"
],
"labware": "rep 2"
},
"Liquid_3": {
"slot": 3,
"well": [
"A4",
"A6",
"A10"
],
"labware": "rep 3"
},
"Liquid_6": {
"slot": 3,
"well": [
"A4",
"A6",
"A10"
],
"labware": "rep 3"
},
"dest_set_3": {
"slot": 3,
"well": [
"A4",
"A6",
"A10"
],
"labware": "rep 3"
},
"cell_lines": {
"slot": 4,
"well": [
"A1",
"A3",
"A5"
],
"labware": "DRUG + YOYO-MEDIA"
},
"cell_lines_2": {
"slot": 4,
"well": [
"A1",
"A3",
"A5"
],
"labware": "DRUG + YOYO-MEDIA"
},
"cell_lines_3": {
"slot": 4,
"well": [
"A1",
"A3",
"A5"
],
"labware": "DRUG + YOYO-MEDIA"
}
}
}

View File

@@ -1 +1 @@
__version__ = "0.10.17"
__version__ = "0.10.15"

View File

@@ -1,6 +0,0 @@
"""Entry point for `python -m unilabos`."""
from unilabos.app.main import main
if __name__ == "__main__":
main()

View File

@@ -7,6 +7,7 @@ import sys
import threading
import time
from typing import Dict, Any, List
import networkx as nx
import yaml
@@ -16,9 +17,9 @@ unilabos_dir = os.path.dirname(os.path.dirname(current_dir))
if unilabos_dir not in sys.path:
sys.path.append(unilabos_dir)
from unilabos.app.utils import cleanup_for_restart
from unilabos.utils.banner_print import print_status, print_unilab_banner
from unilabos.config.config import load_config, BasicConfig, HTTPConfig
from unilabos.app.utils import cleanup_for_restart
# Global restart flags (used by ws_client and web/server)
_restart_requested: bool = False
@@ -160,12 +161,6 @@ def parse_args():
default=False,
help="Complete registry information",
)
parser.add_argument(
"--check_mode",
action="store_true",
default=False,
help="Run in check mode for CI: validates registry imports and ensures no file changes",
)
parser.add_argument(
"--no_update_feedback",
action="store_true",
@@ -216,10 +211,7 @@ def main():
args_dict = vars(args)
# 环境检查 - 检查并自动安装必需的包 (可选)
skip_env_check = args_dict.get("skip_env_check", False)
check_mode = args_dict.get("check_mode", False)
if not skip_env_check:
if not args_dict.get("skip_env_check", False):
from unilabos.utils.environment_check import check_environment
if not check_environment(auto_install=True):
@@ -230,21 +222,7 @@ def main():
# 加载配置文件优先加载config然后从env读取
config_path = args_dict.get("config")
if check_mode:
args_dict["working_dir"] = os.path.abspath(os.getcwd())
# 当 skip_env_check 时,默认使用当前目录作为 working_dir
if skip_env_check and not args_dict.get("working_dir") and not config_path:
working_dir = os.path.abspath(os.getcwd())
print_status(f"跳过环境检查模式:使用当前目录作为工作目录 {working_dir}", "info")
# 检查当前目录是否有 local_config.py
local_config_in_cwd = os.path.join(working_dir, "local_config.py")
if os.path.exists(local_config_in_cwd):
config_path = local_config_in_cwd
print_status(f"发现本地配置文件: {config_path}", "info")
else:
print_status(f"未指定config路径可通过 --config 传入 local_config.py 文件路径", "info")
elif os.getcwd().endswith("unilabos_data"):
if os.getcwd().endswith("unilabos_data"):
working_dir = os.path.abspath(os.getcwd())
else:
working_dir = os.path.abspath(os.path.join(os.getcwd(), "unilabos_data"))
@@ -263,7 +241,7 @@ def main():
working_dir = os.path.dirname(config_path)
elif os.path.exists(working_dir) and os.path.exists(os.path.join(working_dir, "local_config.py")):
config_path = os.path.join(working_dir, "local_config.py")
elif not skip_env_check and not config_path and (
elif not config_path and (
not os.path.exists(working_dir) or not os.path.exists(os.path.join(working_dir, "local_config.py"))
):
print_status(f"未指定config路径可通过 --config 传入 local_config.py 文件路径", "info")
@@ -277,11 +255,9 @@ def main():
print_status(f"已创建 local_config.py 路径: {config_path}", "info")
else:
os._exit(1)
# 加载配置文件 (check_mode 跳过)
# 加载配置文件
print_status(f"当前工作目录为 {working_dir}", "info")
if not check_mode:
load_config_from_file(config_path)
load_config_from_file(config_path)
# 根据配置重新设置日志级别
from unilabos.utils.log import configure_logger, logger
@@ -337,7 +313,6 @@ def main():
machine_name = "".join([c if c.isalnum() or c == "_" else "_" for c in machine_name])
BasicConfig.machine_name = machine_name
BasicConfig.vis_2d_enable = args_dict["2d_vis"]
BasicConfig.check_mode = check_mode
from unilabos.resources.graphio import (
read_node_link_json,
@@ -356,14 +331,10 @@ def main():
# 显示启动横幅
print_unilab_banner(args_dict)
# 注册表 - check_mode 时强制启用 complete_registry
complete_registry = args_dict.get("complete_registry", False) or check_mode
lab_registry = build_registry(args_dict["registry_path"], complete_registry, BasicConfig.upload_registry)
# Check mode: complete_registry 完成后直接退出git diff 检测由 CI workflow 执行
if check_mode:
print_status("Check mode: complete_registry 完成,退出", "info")
os._exit(0)
# 注册表
lab_registry = build_registry(
args_dict["registry_path"], args_dict.get("complete_registry", False), BasicConfig.upload_registry
)
if BasicConfig.upload_registry:
# 设备注册到服务端 - 需要 ak 和 sk

View File

@@ -4,40 +4,8 @@ UniLabOS 应用工具函数
提供清理、重启等工具函数
"""
import glob
import os
import shutil
import sys
def patch_rclpy_dll_windows():
"""在 Windows + conda 环境下为 rclpy 打 DLL 加载补丁"""
if sys.platform != "win32" or not os.environ.get("CONDA_PREFIX"):
return
try:
import rclpy
return
except ImportError as e:
if not str(e).startswith("DLL load failed"):
return
cp = os.environ["CONDA_PREFIX"]
impl = os.path.join(cp, "Lib", "site-packages", "rclpy", "impl", "implementation_singleton.py")
pyd = glob.glob(os.path.join(cp, "Lib", "site-packages", "rclpy", "_rclpy_pybind11*.pyd"))
if not os.path.exists(impl) or not pyd:
return
with open(impl, "r", encoding="utf-8") as f:
content = f.read()
lib_bin = os.path.join(cp, "Library", "bin").replace("\\", "/")
patch = f'# UniLabOS DLL Patch\nimport os,ctypes\nos.add_dll_directory("{lib_bin}") if hasattr(os,"add_dll_directory") else None\ntry: ctypes.CDLL("{pyd[0].replace(chr(92),"/")}")\nexcept: pass\n# End Patch\n'
shutil.copy2(impl, impl + ".bak")
with open(impl, "w", encoding="utf-8") as f:
f.write(patch + content)
patch_rclpy_dll_windows()
import gc
import os
import threading
import time

View File

@@ -359,7 +359,9 @@ class HTTPClient:
Returns:
Dict: API响应数据包含 code 和 data (uuid, name)
"""
# target_lab_uuid 暂时使用默认值,后续由后端根据 ak/sk 获取
payload = {
"target_lab_uuid": "28c38bb0-63f6-4352-b0d8-b5b8eb1766d5",
"name": name,
"data": {
"workflow_uuid": workflow_uuid,

View File

@@ -58,14 +58,14 @@ class JobResultStore:
feedback=feedback or {},
timestamp=time.time(),
)
logger.trace(f"[JobResultStore] Stored result for job {job_id[:8]}, status={status}")
logger.debug(f"[JobResultStore] Stored result for job {job_id[:8]}, status={status}")
def get_and_remove(self, job_id: str) -> Optional[JobResult]:
"""获取并删除任务结果"""
with self._results_lock:
result = self._results.pop(job_id, None)
if result:
logger.trace(f"[JobResultStore] Retrieved and removed result for job {job_id[:8]}")
logger.debug(f"[JobResultStore] Retrieved and removed result for job {job_id[:8]}")
return result
def get_result(self, job_id: str) -> Optional[JobResult]:

View File

@@ -23,7 +23,7 @@ from typing import Optional, Dict, Any, List
from urllib.parse import urlparse
from enum import Enum
from typing_extensions import TypedDict
from jedi.inference.gradual.typing import TypedDict
from unilabos.app.model import JobAddReq
from unilabos.ros.nodes.presets.host_node import HostNode
@@ -154,7 +154,7 @@ class DeviceActionManager:
job_info.set_ready_timeout(10) # 设置10秒超时
self.active_jobs[device_key] = job_info
job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name)
logger.trace(f"[DeviceActionManager] Job {job_log} can start immediately for {device_key}")
logger.info(f"[DeviceActionManager] Job {job_log} can start immediately for {device_key}")
return True
def start_job(self, job_id: str) -> bool:
@@ -210,9 +210,8 @@ class DeviceActionManager:
job_info.update_timestamp()
# 从all_jobs中移除已结束的job
del self.all_jobs[job_id]
# job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name)
# logger.debug(f"[DeviceActionManager] Job {job_log} ended for {device_key}")
pass
job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name)
logger.info(f"[DeviceActionManager] Job {job_log} ended for {device_key}")
else:
job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name)
logger.warning(f"[DeviceActionManager] Job {job_log} was not active for {device_key}")
@@ -228,7 +227,7 @@ class DeviceActionManager:
next_job_log = format_job_log(
next_job.job_id, next_job.task_id, next_job.device_id, next_job.action_name
)
logger.trace(f"[DeviceActionManager] Next job {next_job_log} can start for {device_key}")
logger.info(f"[DeviceActionManager] Next job {next_job_log} can start for {device_key}")
return next_job
return None
@@ -269,7 +268,7 @@ class DeviceActionManager:
# 从all_jobs中移除
del self.all_jobs[job_id]
job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name)
logger.trace(f"[DeviceActionManager] Active job {job_log} cancelled for {device_key}")
logger.info(f"[DeviceActionManager] Active job {job_log} cancelled for {device_key}")
# 启动下一个任务
if device_key in self.device_queues and self.device_queues[device_key]:
@@ -282,7 +281,7 @@ class DeviceActionManager:
next_job_log = format_job_log(
next_job.job_id, next_job.task_id, next_job.device_id, next_job.action_name
)
logger.trace(f"[DeviceActionManager] Next job {next_job_log} can start after cancel")
logger.info(f"[DeviceActionManager] Next job {next_job_log} can start after cancel")
return True
# 如果是排队中的任务
@@ -296,7 +295,7 @@ class DeviceActionManager:
job_log = format_job_log(
job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name
)
logger.trace(f"[DeviceActionManager] Queued job {job_log} cancelled for {device_key}")
logger.info(f"[DeviceActionManager] Queued job {job_log} cancelled for {device_key}")
return True
job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name)
@@ -495,12 +494,8 @@ class MessageProcessor:
await self._process_message(message_type, message_data)
else:
if message_type.endswith("_material"):
logger.trace(
f"[MessageProcessor] 收到一条归属 {data.get('edge_session')} 的旧消息{data}"
)
logger.debug(
f"[MessageProcessor] 跳过了一条归属 {data.get('edge_session')} 的旧消息: {data.get('action')}"
)
logger.trace(f"[MessageProcessor] 收到一条归属 {data.get('edge_session')} 的旧消息:{data}")
logger.debug(f"[MessageProcessor] 跳过了一条归属 {data.get('edge_session')} 的旧消息: {data.get('action')}")
else:
await self._process_message(message_type, message_data)
except json.JSONDecodeError:
@@ -570,7 +565,7 @@ class MessageProcessor:
async def _process_message(self, message_type: str, message_data: Dict[str, Any]):
"""处理收到的消息"""
logger.trace(f"[MessageProcessor] Processing message: {message_type}")
logger.debug(f"[MessageProcessor] Processing message: {message_type}")
try:
if message_type == "pong":
@@ -642,13 +637,13 @@ class MessageProcessor:
await self._send_action_state_response(
device_id, action_name, task_id, job_id, "query_action_status", True, 0
)
logger.trace(f"[MessageProcessor] Job {job_log} can start immediately")
logger.info(f"[MessageProcessor] Job {job_log} can start immediately")
else:
# 需要排队
await self._send_action_state_response(
device_id, action_name, task_id, job_id, "query_action_status", False, 10
)
logger.trace(f"[MessageProcessor] Job {job_log} queued")
logger.info(f"[MessageProcessor] Job {job_log} queued")
# 通知QueueProcessor有新的队列更新
if self.queue_processor:
@@ -852,7 +847,9 @@ class MessageProcessor:
device_action_groups[key_add] = []
device_action_groups[key_add].append(item["uuid"])
logger.info(f"[资源同步] 跨站Transfer: {item['uuid'][:8]} from {device_old_id} to {device_id}")
logger.info(
f"[资源同步] 跨站Transfer: {item['uuid'][:8]} from {device_old_id} to {device_id}"
)
else:
# 正常update
key = (device_id, "update")
@@ -866,9 +863,7 @@ class MessageProcessor:
device_action_groups[key] = []
device_action_groups[key].append(item["uuid"])
logger.trace(
f"[资源同步] 动作 {action} 分组数量: {len(device_action_groups)}, 总数量: {len(resource_uuid_list)}"
)
logger.trace(f"[资源同步] 动作 {action} 分组数量: {len(device_action_groups)}, 总数量: {len(resource_uuid_list)}")
# 为每个(device_id, action)创建独立的更新线程
for (device_id, actual_action), items in device_action_groups.items():
@@ -916,13 +911,13 @@ class MessageProcessor:
# 发送确认消息
if self.websocket_client:
await self.websocket_client.send_message(
{"action": "restart_acknowledged", "data": {"reason": reason, "delay": delay}}
)
await self.websocket_client.send_message({
"action": "restart_acknowledged",
"data": {"reason": reason, "delay": delay}
})
# 设置全局重启标志
import unilabos.app.main as main_module
main_module._restart_requested = True
main_module._restart_reason = reason
@@ -932,12 +927,10 @@ class MessageProcessor:
# 在新线程中执行清理,避免阻塞当前事件循环
def do_cleanup():
import time
time.sleep(0.5) # 给当前消息处理完成的时间
logger.info(f"[MessageProcessor] Starting cleanup for restart, reason: {reason}")
try:
from unilabos.app.utils import cleanup_for_restart
if cleanup_for_restart():
logger.info("[MessageProcessor] Cleanup successful, main() will restart")
else:
@@ -1135,7 +1128,7 @@ class QueueProcessor:
success = self.message_processor.send_message(message)
job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name)
if success:
logger.trace(f"[QueueProcessor] Sent busy/need_more for queued job {job_log}")
logger.debug(f"[QueueProcessor] Sent busy/need_more for queued job {job_log}")
else:
logger.warning(f"[QueueProcessor] Failed to send busy status for job {job_log}")
@@ -1158,7 +1151,7 @@ class QueueProcessor:
job_info.action_name,
)
logger.trace(f"[QueueProcessor] Job {job_log} completed with status: {status}")
logger.info(f"[QueueProcessor] Job {job_log} completed with status: {status}")
# 结束任务,获取下一个可执行的任务
next_job = self.device_manager.end_job(job_id)
@@ -1178,8 +1171,8 @@ class QueueProcessor:
},
}
self.message_processor.send_message(message)
# next_job_log = format_job_log(next_job.job_id, next_job.task_id, next_job.device_id, next_job.action_name)
# logger.debug(f"[QueueProcessor] Notified next job {next_job_log} can start")
next_job_log = format_job_log(next_job.job_id, next_job.task_id, next_job.device_id, next_job.action_name)
logger.info(f"[QueueProcessor] Notified next job {next_job_log} can start")
# 立即触发下一轮状态检查
self.notify_queue_update()
@@ -1321,7 +1314,7 @@ class WebSocketClient(BaseCommunicationClient):
except (KeyError, AttributeError):
logger.warning(f"[WebSocketClient] Failed to remove job {item.job_id} from HostNode status")
# logger.debug(f"[WebSocketClient] Intercepting final status for job_id: {item.job_id} - {status}")
logger.info(f"[WebSocketClient] Intercepting final status for job_id: {item.job_id} - {status}")
# 通知队列处理器job完成包括timeout的job
self.queue_processor.handle_job_completed(item.job_id, status)
@@ -1388,9 +1381,7 @@ class WebSocketClient(BaseCommunicationClient):
if host_node:
# 获取设备信息
for device_id, namespace in host_node.devices_names.items():
device_key = (
f"{namespace}/{device_id}" if namespace.startswith("/") else f"/{namespace}/{device_id}"
)
device_key = f"{namespace}/{device_id}" if namespace.startswith("/") else f"/{namespace}/{device_id}"
is_online = device_key in host_node._online_devices
# 获取设备的动作信息
@@ -1404,16 +1395,14 @@ class WebSocketClient(BaseCommunicationClient):
"action_type": str(type(client).__name__),
}
devices.append(
{
"device_id": device_id,
"namespace": namespace,
"device_key": device_key,
"is_online": is_online,
"machine_name": host_node.device_machine_names.get(device_id, machine_name),
"actions": actions,
}
)
devices.append({
"device_id": device_id,
"namespace": namespace,
"device_key": device_key,
"is_online": is_online,
"machine_name": host_node.device_machine_names.get(device_id, machine_name),
"actions": actions,
})
logger.info(f"[WebSocketClient] Collected {len(devices)} devices for host_ready")
except Exception as e:

View File

@@ -22,7 +22,6 @@ class BasicConfig:
startup_json_path = None # 填写绝对路径
disable_browser = False # 禁止浏览器自动打开
port = 8002 # 本地HTTP服务
check_mode = False # CI 检查模式,用于验证 registry 导入和文件一致性
# 'TRACE', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
log_level: Literal["TRACE", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] = "DEBUG"

File diff suppressed because it is too large Load Diff

View File

@@ -30,30 +30,9 @@ from pylabrobot.liquid_handling.standard import (
ResourceMove,
ResourceDrop,
)
from pylabrobot.resources import (
ResourceHolder,
ResourceStack,
Tip,
Deck,
Plate,
Well,
TipRack,
Resource,
Container,
Coordinate,
TipSpot,
Trash,
PlateAdapter,
TubeRack,
)
from pylabrobot.resources import ResourceHolder, ResourceStack, Tip, Deck, Plate, Well, TipRack, Resource, Container, Coordinate, TipSpot, Trash, PlateAdapter, TubeRack
from unilabos.devices.liquid_handling.liquid_handler_abstract import (
LiquidHandlerAbstract,
SimpleReturn,
SetLiquidReturn,
SetLiquidFromPlateReturn,
)
from unilabos.registry.placeholder_type import ResourceSlot
from unilabos.devices.liquid_handling.liquid_handler_abstract import LiquidHandlerAbstract, SimpleReturn
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode
@@ -101,7 +80,6 @@ class PRCXI9300Deck(Deck):
self.slots[slot - 1] = resource
super().assign_child_resource(resource, location=self.slot_locations[slot - 1])
class PRCXI9300Container(Plate):
"""PRCXI 9300 的专用 Container 类,继承自 Plate用于槽位定位和未知模块。
@@ -130,29 +108,20 @@ class PRCXI9300Container(Plate):
def serialize_state(self) -> Dict[str, Dict[str, Any]]:
data = super().serialize_state()
data.update(self._unilabos_state)
return data
return data
class PRCXI9300Plate(Plate):
"""
"""
专用孔板类:
1. 继承自 PLR 原生 Plate保留所有物理特性。
2. 增加 material_info 参数,用于在初始化时直接绑定 Unilab UUID。
"""
def __init__(
self,
name: str,
size_x: float,
size_y: float,
size_z: float,
category: str = "plate",
ordered_items: collections.OrderedDict = None,
ordering: Optional[collections.OrderedDict] = None,
model: Optional[str] = None,
material_info: Optional[Dict[str, Any]] = None,
**kwargs,
):
def __init__(self, name: str, size_x: float, size_y: float, size_z: float,
category: str = "plate",
ordered_items: collections.OrderedDict = None,
ordering: Optional[collections.OrderedDict] = None,
model: Optional[str] = None,
material_info: Optional[Dict[str, Any]] = None,
**kwargs):
# 如果 ordered_items 不为 None直接使用
if ordered_items is not None:
items = ordered_items
@@ -173,34 +142,40 @@ class PRCXI9300Plate(Plate):
else:
items = None
ordering_param = None
# 根据情况传递不同的参数
if items is not None:
super().__init__(
name, size_x, size_y, size_z, ordered_items=items, category=category, model=model, **kwargs
)
super().__init__(name, size_x, size_y, size_z,
ordered_items=items,
category=category,
model=model, **kwargs)
elif ordering_param is not None:
# 传递 ordering 参数,让 Plate 自己创建 Well 对象
super().__init__(
name, size_x, size_y, size_z, ordering=ordering_param, category=category, model=model, **kwargs
)
super().__init__(name, size_x, size_y, size_z,
ordering=ordering_param,
category=category,
model=model, **kwargs)
else:
super().__init__(name, size_x, size_y, size_z, category=category, model=model, **kwargs)
super().__init__(name, size_x, size_y, size_z,
category=category,
model=model, **kwargs)
self._unilabos_state = {}
if material_info:
self._unilabos_state["Material"] = material_info
def load_state(self, state: Dict[str, Any]) -> None:
super().load_state(state)
self._unilabos_state = state
def serialize_state(self) -> Dict[str, Dict[str, Any]]:
try:
data = super().serialize_state()
except AttributeError:
data = {}
if hasattr(self, "_unilabos_state") and self._unilabos_state:
if hasattr(self, '_unilabos_state') and self._unilabos_state:
safe_state = {}
for k, v in self._unilabos_state.items():
# 如果是 Material 字典,深入检查
@@ -213,32 +188,23 @@ class PRCXI9300Plate(Plate):
else:
# 打印日志提醒(可选)
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
pass
pass
safe_state[k] = safe_material
# 其他顶层属性也进行类型检查
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
safe_state[k] = v
data.update(safe_state)
return data # 其他顶层属性也进行类型检查
return data # 其他顶层属性也进行类型检查
class PRCXI9300TipRack(TipRack):
"""专用吸头盒类"""
def __init__(
self,
name: str,
size_x: float,
size_y: float,
size_z: float,
category: str = "tip_rack",
ordered_items: collections.OrderedDict = None,
ordering: Optional[collections.OrderedDict] = None,
model: Optional[str] = None,
material_info: Optional[Dict[str, Any]] = None,
**kwargs,
):
""" 专用吸头盒类 """
def __init__(self, name: str, size_x: float, size_y: float, size_z: float,
category: str = "tip_rack",
ordered_items: collections.OrderedDict = None,
ordering: Optional[collections.OrderedDict] = None,
model: Optional[str] = None,
material_info: Optional[Dict[str, Any]] = None,
**kwargs):
# 如果 ordered_items 不为 None直接使用
if ordered_items is not None:
items = ordered_items
@@ -259,23 +225,27 @@ class PRCXI9300TipRack(TipRack):
else:
items = None
ordering_param = None
# 根据情况传递不同的参数
if items is not None:
super().__init__(
name, size_x, size_y, size_z, ordered_items=items, category=category, model=model, **kwargs
)
super().__init__(name, size_x, size_y, size_z,
ordered_items=items,
category=category,
model=model, **kwargs)
elif ordering_param is not None:
# 传递 ordering 参数,让 TipRack 自己创建 Tip 对象
super().__init__(
name, size_x, size_y, size_z, ordering=ordering_param, category=category, model=model, **kwargs
)
super().__init__(name, size_x, size_y, size_z,
ordering=ordering_param,
category=category,
model=model, **kwargs)
else:
super().__init__(name, size_x, size_y, size_z, category=category, model=model, **kwargs)
super().__init__(name, size_x, size_y, size_z,
category=category,
model=model, **kwargs)
self._unilabos_state = {}
if material_info:
self._unilabos_state["Material"] = material_info
def load_state(self, state: Dict[str, Any]) -> None:
super().load_state(state)
self._unilabos_state = state
@@ -285,7 +255,7 @@ class PRCXI9300TipRack(TipRack):
data = super().serialize_state()
except AttributeError:
data = {}
if hasattr(self, "_unilabos_state") and self._unilabos_state:
if hasattr(self, '_unilabos_state') and self._unilabos_state:
safe_state = {}
for k, v in self._unilabos_state.items():
# 如果是 Material 字典,深入检查
@@ -298,33 +268,26 @@ class PRCXI9300TipRack(TipRack):
else:
# 打印日志提醒(可选)
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
pass
pass
safe_state[k] = safe_material
# 其他顶层属性也进行类型检查
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
safe_state[k] = v
data.update(safe_state)
return data
class PRCXI9300Trash(Trash):
"""PRCXI 9300 的专用 Trash 类,继承自 Trash。
该类定义了 PRCXI 9300 的工作台布局和槽位信息。
"""
def __init__(
self,
name: str,
size_x: float,
size_y: float,
size_z: float,
category: str = "trash",
material_info: Optional[Dict[str, Any]] = None,
**kwargs,
):
def __init__(self, name: str, size_x: float, size_y: float, size_z: float,
category: str = "trash",
material_info: Optional[Dict[str, Any]] = None,
**kwargs):
if name != "trash":
print(f"Warning: PRCXI9300Trash usually expects name='trash' for backend logic, but got '{name}'.")
super().__init__(name, size_x, size_y, size_z, **kwargs)
@@ -343,7 +306,7 @@ class PRCXI9300Trash(Trash):
data = super().serialize_state()
except AttributeError:
data = {}
if hasattr(self, "_unilabos_state") and self._unilabos_state:
if hasattr(self, '_unilabos_state') and self._unilabos_state:
safe_state = {}
for k, v in self._unilabos_state.items():
# 如果是 Material 字典,深入检查
@@ -356,37 +319,29 @@ class PRCXI9300Trash(Trash):
else:
# 打印日志提醒(可选)
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
pass
pass
safe_state[k] = safe_material
# 其他顶层属性也进行类型检查
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
safe_state[k] = v
data.update(safe_state)
return data
class PRCXI9300TubeRack(TubeRack):
"""
专用管架类:用于 EP 管架、试管架等。
继承自 PLR 的 TubeRack并支持注入 material_info (UUID)。
"""
def __init__(
self,
name: str,
size_x: float,
size_y: float,
size_z: float,
category: str = "tube_rack",
items: Optional[Dict[str, Any]] = None,
ordered_items: Optional[OrderedDict] = None,
ordering: Optional[OrderedDict] = None,
model: Optional[str] = None,
material_info: Optional[Dict[str, Any]] = None,
**kwargs,
):
def __init__(self, name: str, size_x: float, size_y: float, size_z: float,
category: str = "tube_rack",
items: Optional[Dict[str, Any]] = None,
ordered_items: Optional[OrderedDict] = None,
ordering: Optional[OrderedDict] = None,
model: Optional[str] = None,
material_info: Optional[Dict[str, Any]] = None,
**kwargs):
# 如果 ordered_items 不为 None直接使用
if ordered_items is not None:
items_to_pass = ordered_items
@@ -412,16 +367,24 @@ class PRCXI9300TubeRack(TubeRack):
else:
items_to_pass = None
ordering_param = None
# 根据情况传递不同的参数
if items_to_pass is not None:
super().__init__(name, size_x, size_y, size_z, ordered_items=items_to_pass, model=model, **kwargs)
super().__init__(name, size_x, size_y, size_z,
ordered_items=items_to_pass,
model=model,
**kwargs)
elif ordering_param is not None:
# 传递 ordering 参数,让 TubeRack 自己创建 Tube 对象
super().__init__(name, size_x, size_y, size_z, ordering=ordering_param, model=model, **kwargs)
super().__init__(name, size_x, size_y, size_z,
ordering=ordering_param,
model=model,
**kwargs)
else:
super().__init__(name, size_x, size_y, size_z, model=model, **kwargs)
super().__init__(name, size_x, size_y, size_z,
model=model,
**kwargs)
self._unilabos_state = {}
if material_info:
self._unilabos_state["Material"] = material_info
@@ -431,7 +394,7 @@ class PRCXI9300TubeRack(TubeRack):
data = super().serialize_state()
except AttributeError:
data = {}
if hasattr(self, "_unilabos_state") and self._unilabos_state:
if hasattr(self, '_unilabos_state') and self._unilabos_state:
safe_state = {}
for k, v in self._unilabos_state.items():
# 如果是 Material 字典,深入检查
@@ -444,41 +407,33 @@ class PRCXI9300TubeRack(TubeRack):
else:
# 打印日志提醒(可选)
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
pass
pass
safe_state[k] = safe_material
# 其他顶层属性也进行类型检查
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
safe_state[k] = v
data.update(safe_state)
return data
class PRCXI9300PlateAdapter(PlateAdapter):
"""
专用板式适配器类:用于承载 Plate 的底座(如 PCR 适配器、磁吸架等)。
支持注入 material_info (UUID)。
"""
def __init__(
self,
name: str,
size_x: float,
size_y: float,
size_z: float,
category: str = "plate_adapter",
model: Optional[str] = None,
material_info: Optional[Dict[str, Any]] = None,
# 参数给予默认值 (标准96孔板尺寸)
adapter_hole_size_x: float = 127.76,
adapter_hole_size_y: float = 85.48,
adapter_hole_size_z: float = 10.0, # 假设凹槽深度或板子放置高度
dx: Optional[float] = None,
dy: Optional[float] = None,
dz: float = 0.0, # 默认Z轴偏移
**kwargs,
):
def __init__(self, name: str, size_x: float, size_y: float, size_z: float,
category: str = "plate_adapter",
model: Optional[str] = None,
material_info: Optional[Dict[str, Any]] = None,
# 参数给予默认值 (标准96孔板尺寸)
adapter_hole_size_x: float = 127.76,
adapter_hole_size_y: float = 85.48,
adapter_hole_size_z: float = 10.0, # 假设凹槽深度或板子放置高度
dx: Optional[float] = None,
dy: Optional[float] = None,
dz: float = 0.0, # 默认Z轴偏移
**kwargs):
# 自动居中计算:如果未指定 dx/dy则根据适配器尺寸和孔尺寸计算居中位置
if dx is None:
dx = (size_x - adapter_hole_size_x) / 2
@@ -486,20 +441,20 @@ class PRCXI9300PlateAdapter(PlateAdapter):
dy = (size_y - adapter_hole_size_y) / 2
super().__init__(
name=name,
size_x=size_x,
size_y=size_y,
size_z=size_z,
name=name,
size_x=size_x,
size_y=size_y,
size_z=size_z,
dx=dx,
dy=dy,
dz=dz,
adapter_hole_size_x=adapter_hole_size_x,
adapter_hole_size_y=adapter_hole_size_y,
adapter_hole_size_z=adapter_hole_size_z,
model=model,
**kwargs,
model=model,
**kwargs
)
self._unilabos_state = {}
if material_info:
self._unilabos_state["Material"] = material_info
@@ -509,7 +464,7 @@ class PRCXI9300PlateAdapter(PlateAdapter):
data = super().serialize_state()
except AttributeError:
data = {}
if hasattr(self, "_unilabos_state") and self._unilabos_state:
if hasattr(self, '_unilabos_state') and self._unilabos_state:
safe_state = {}
for k, v in self._unilabos_state.items():
# 如果是 Material 字典,深入检查
@@ -522,16 +477,15 @@ class PRCXI9300PlateAdapter(PlateAdapter):
else:
# 打印日志提醒(可选)
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
pass
pass
safe_state[k] = safe_material
# 其他顶层属性也进行类型检查
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
safe_state[k] = v
data.update(safe_state)
return data
class PRCXI9300Handler(LiquidHandlerAbstract):
support_touch_tip = False
@@ -564,9 +518,7 @@ class PRCXI9300Handler(LiquidHandlerAbstract):
if "Material" in child.children[0]._unilabos_state:
number = int(child.name.replace("T", ""))
tablets_info.append(
WorkTablets(
Number=number, Code=f"T{number}", Material=child.children[0]._unilabos_state["Material"]
)
WorkTablets(Number=number, Code=f"T{number}", Material=child.children[0]._unilabos_state["Material"])
)
if is_9320:
print("当前设备是9320")
@@ -586,14 +538,9 @@ class PRCXI9300Handler(LiquidHandlerAbstract):
super().post_init(ros_node)
self._unilabos_backend.post_init(ros_node)
def set_liquid(self, wells: list[Well], liquid_names: list[str], volumes: list[float]) -> SetLiquidReturn:
def set_liquid(self, wells: list[Well], liquid_names: list[str], volumes: list[float]) -> SimpleReturn:
return super().set_liquid(wells, liquid_names, volumes)
def set_liquid_from_plate(
self, plate: ResourceSlot, well_names: list[str], liquid_names: list[str], volumes: list[float]
) -> SetLiquidFromPlateReturn:
return super().set_liquid_from_plate(plate, well_names, liquid_names, volumes)
def set_group(self, group_name: str, wells: List[Well], volumes: List[float]):
return super().set_group(group_name, wells, volumes)
@@ -852,8 +799,7 @@ class PRCXI9300Handler(LiquidHandlerAbstract):
return await self._unilabos_backend.shaker_action(time, module_no, amplitude, is_wait)
async def heater_action(self, temperature: float, time: int):
return await self._unilabos_backend.heater_action(temperature, time)
return await self._unilabos_backend.heater_action(temperature, time)
async def move_plate(
self,
plate: Plate,
@@ -876,11 +822,10 @@ class PRCXI9300Handler(LiquidHandlerAbstract):
drop_direction,
pickup_direction,
pickup_distance_from_top,
target_plate_number=to,
target_plate_number = to,
**backend_kwargs,
)
class PRCXI9300Backend(LiquidHandlerBackend):
"""PRCXI 9300 的后端实现,继承自 LiquidHandlerBackend。
@@ -933,28 +878,31 @@ class PRCXI9300Backend(LiquidHandlerBackend):
self.steps_todo_list.append(step)
return step
async def pick_up_resource(self, pickup: ResourcePickup, **backend_kwargs):
resource = pickup.resource
offset = pickup.offset
pickup_distance_from_top = pickup.pickup_distance_from_top
direction = pickup.direction
async def pick_up_resource(self, pickup: ResourcePickup, **backend_kwargs):
resource=pickup.resource
offset=pickup.offset
pickup_distance_from_top=pickup.pickup_distance_from_top
direction=pickup.direction
plate_number = int(resource.parent.name.replace("T", ""))
is_whole_plate = True
balance_height = 0
step = self.api_client.clamp_jaw_pick_up(plate_number, is_whole_plate, balance_height)
self.steps_todo_list.append(step)
return step
async def drop_resource(self, drop: ResourceDrop, **backend_kwargs):
plate_number = None
target_plate_number = backend_kwargs.get("target_plate_number", None)
if target_plate_number is not None:
plate_number = int(target_plate_number.name.replace("T", ""))
is_whole_plate = True
balance_height = 0
if plate_number is None:
@@ -963,6 +911,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
self.steps_todo_list.append(step)
return step
async def heater_action(self, temperature: float, time: int):
print(f"\n\nHeater action: temperature={temperature}, time={time}\n\n")
# return await self.api_client.heater_action(temperature, time)
@@ -1019,7 +968,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
error_code = self.api_client.get_error_code()
if error_code:
print(f"PRCXI9300 error code detected: {error_code}")
# 清除错误代码
self.api_client.clear_error_code()
print("PRCXI9300 error code cleared.")
@@ -1027,11 +976,11 @@ class PRCXI9300Backend(LiquidHandlerBackend):
# 执行重置
print("Starting PRCXI9300 reset...")
self.api_client.call("IAutomation", "Reset")
# 检查重置状态并等待完成
while not self.is_reset_ok:
print("Waiting for PRCXI9300 to reset...")
if hasattr(self, "_ros_node") and self._ros_node is not None:
if hasattr(self, '_ros_node') and self._ros_node is not None:
await self._ros_node.sleep(1)
else:
await asyncio.sleep(1)
@@ -1049,7 +998,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
"""Pick up tips from the specified resource."""
# INSERT_YOUR_CODE
# Ensure use_channels is converted to a list of ints if it's an array
if hasattr(use_channels, "tolist"):
if hasattr(use_channels, 'tolist'):
_use_channels = use_channels.tolist()
else:
_use_channels = list(use_channels) if use_channels is not None else None
@@ -1103,7 +1052,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
async def drop_tips(self, ops: List[Drop], use_channels: List[int] = None):
"""Pick up tips from the specified resource."""
if hasattr(use_channels, "tolist"):
if hasattr(use_channels, 'tolist'):
_use_channels = use_channels.tolist()
else:
_use_channels = list(use_channels) if use_channels is not None else None
@@ -1186,7 +1135,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
none_keys: List[str] = [],
):
"""Mix liquid in the specified resources."""
plate_indexes = []
for op in targets:
deck = op.parent.parent.parent
@@ -1229,7 +1178,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
async def aspirate(self, ops: List[SingleChannelAspiration], use_channels: List[int] = None):
"""Aspirate liquid from the specified resources."""
if hasattr(use_channels, "tolist"):
if hasattr(use_channels, 'tolist'):
_use_channels = use_channels.tolist()
else:
_use_channels = list(use_channels) if use_channels is not None else None
@@ -1286,7 +1235,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
async def dispense(self, ops: List[SingleChannelDispense], use_channels: List[int] = None):
"""Dispense liquid into the specified resources."""
if hasattr(use_channels, "tolist"):
if hasattr(use_channels, 'tolist'):
_use_channels = use_channels.tolist()
else:
_use_channels = list(use_channels) if use_channels is not None else None
@@ -1467,6 +1416,7 @@ class PRCXI9300Api:
time.sleep(1)
return success
def call(self, service: str, method: str, params: Optional[list] = None) -> Any:
payload = json.dumps(
{"ServiceName": service, "MethodName": method, "Paramters": params or []}, separators=(",", ":")
@@ -1593,7 +1543,7 @@ class PRCXI9300Api:
assist_fun5: str = "",
liquid_method: str = "NormalDispense",
axis: str = "Left",
) -> Dict[str, Any]:
) -> Dict[str, Any]:
return {
"StepAxis": axis,
"Function": "Imbibing",
@@ -1671,7 +1621,7 @@ class PRCXI9300Api:
assist_fun5: str = "",
liquid_method: str = "NormalDispense",
axis: str = "Left",
) -> Dict[str, Any]:
) -> Dict[str, Any]:
return {
"StepAxis": axis,
"Function": "Blending",
@@ -1731,11 +1681,11 @@ class PRCXI9300Api:
"LiquidDispensingMethod": liquid_method,
}
def clamp_jaw_pick_up(
self,
def clamp_jaw_pick_up(self,
plate_no: int,
is_whole_plate: bool,
balance_height: int,
) -> Dict[str, Any]:
return {
"StepAxis": "ClampingJaw",
@@ -1745,7 +1695,7 @@ class PRCXI9300Api:
"HoleRow": 1,
"HoleCol": 1,
"BalanceHeight": balance_height,
"PlateOrHoleNum": f"T{plate_no}",
"PlateOrHoleNum": f"T{plate_no}"
}
def clamp_jaw_drop(
@@ -1753,6 +1703,7 @@ class PRCXI9300Api:
plate_no: int,
is_whole_plate: bool,
balance_height: int,
) -> Dict[str, Any]:
return {
"StepAxis": "ClampingJaw",
@@ -1762,7 +1713,7 @@ class PRCXI9300Api:
"HoleRow": 1,
"HoleCol": 1,
"BalanceHeight": balance_height,
"PlateOrHoleNum": f"T{plate_no}",
"PlateOrHoleNum": f"T{plate_no}"
}
def shaker_action(self, time: int, module_no: int, amplitude: int, is_wait: bool):
@@ -1775,7 +1726,6 @@ class PRCXI9300Api:
"AssistFun4": is_wait,
}
class DefaultLayout:
def __init__(self, product_name: str = "PRCXI9300"):
@@ -2154,9 +2104,7 @@ if __name__ == "__main__":
size_y=50,
size_z=10,
category="tip_rack",
ordered_items=collections.OrderedDict(
{k: f"{child_prefix}_{k}" for k, v in tip_racks["ordering"].items()}
),
ordered_items=collections.OrderedDict({k: f"{child_prefix}_{k}" for k, v in tip_racks["ordering"].items()}),
)
tip_rack_serialized = tip_rack.serialize()
tip_rack_serialized["parent_name"] = deck.name
@@ -2351,37 +2299,43 @@ if __name__ == "__main__":
A = tree_to_list([resource_plr_to_ulab(deck)])
with open("deck.json", "w", encoding="utf-8") as f:
A.insert(
0,
{
"id": "PRCXI",
"name": "PRCXI",
"parent": None,
"type": "device",
"class": "liquid_handler.prcxi",
"position": {"x": 0, "y": 0, "z": 0},
"config": {
"deck": {
"_resource_child_name": "PRCXI_Deck",
"_resource_type": "unilabos.devices.liquid_handling.prcxi.prcxi:PRCXI9300Deck",
},
"host": "192.168.0.121",
"port": 9999,
"timeout": 10.0,
"axis": "Right",
"channel_num": 1,
"setup": False,
"debug": True,
"simulator": True,
"matrix_id": "5de524d0-3f95-406c-86dd-f83626ebc7cb",
"is_9320": True,
},
"data": {},
"children": ["PRCXI_Deck"],
A.insert(0, {
"id": "PRCXI",
"name": "PRCXI",
"parent": None,
"type": "device",
"class": "liquid_handler.prcxi",
"position": {
"x": 0,
"y": 0,
"z": 0
},
)
"config": {
"deck": {
"_resource_child_name": "PRCXI_Deck",
"_resource_type": "unilabos.devices.liquid_handling.prcxi.prcxi:PRCXI9300Deck"
},
"host": "192.168.0.121",
"port": 9999,
"timeout": 10.0,
"axis": "Right",
"channel_num": 1,
"setup": False,
"debug": True,
"simulator": True,
"matrix_id": "5de524d0-3f95-406c-86dd-f83626ebc7cb",
"is_9320": True
},
"data": {},
"children": [
"PRCXI_Deck"
]
})
A[1]["parent"] = "PRCXI"
json.dump({"nodes": A, "links": []}, f, indent=4, ensure_ascii=False)
json.dump({
"nodes": A,
"links": []
}, f, indent=4, ensure_ascii=False)
handler = PRCXI9300Handler(
deck=deck,
@@ -2423,6 +2377,7 @@ if __name__ == "__main__":
time.sleep(5)
os._exit(0)
prcxi_api = PRCXI9300Api(host="192.168.0.121", port=9999)
prcxi_api.list_matrices()
prcxi_api.get_all_materials()

View File

@@ -1,376 +0,0 @@
# -*- coding: utf-8 -*-
"""
ZDT X42 Closed-Loop Stepper Motor Driver
RS485 Serial Communication via USB-Serial Converter
- Baudrate: 115200
"""
import serial
import time
import threading
import struct
import logging
from typing import Optional, Any
try:
from unilabos.device_comms.universal_driver import UniversalDriver
except ImportError:
class UniversalDriver:
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger(self.__class__.__name__)
def execute_command_from_outer(self, command: Any): pass
from serial.rs485 import RS485Settings
class ZDTX42Driver(UniversalDriver):
"""
ZDT X42 闭环步进电机驱动器
支持功能:
- 速度模式运行
- 位置模式运行 (相对/绝对)
- 位置读取和清零
- 使能/禁用控制
通信协议:
- 帧格式: [设备ID] [功能码] [数据...] [校验位=0x6B]
- 响应长度根据功能码决定
"""
def __init__(
self,
port: str,
baudrate: int = 115200,
device_id: int = 1,
timeout: float = 0.5,
debug: bool = False
):
"""
初始化 ZDT X42 电机驱动
Args:
port: 串口设备路径
baudrate: 波特率 (默认 115200)
device_id: 设备地址 (1-255)
timeout: 通信超时时间(秒)
debug: 是否启用调试输出
"""
super().__init__()
self.id = device_id
self.debug = debug
self.lock = threading.RLock()
self.status = "idle" # 对应注册表中的 status (str)
self.position = 0 # 对应注册表中的 position (int)
try:
self.ser = serial.Serial(
port=port,
baudrate=baudrate,
timeout=timeout,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE
)
# 启用 RS485 模式
try:
self.ser.rs485_mode = RS485Settings(
rts_level_for_tx=True,
rts_level_for_rx=False
)
except Exception:
pass # RS485 模式是可选的
self.logger.info(
f"ZDT X42 Motor connected: {port} "
f"(Baud: {baudrate}, ID: {device_id})"
)
# 自动使能电机,确保初始状态可运动
self.enable(True)
# 启动背景轮询线程,确保 position 实时刷新
self._stop_event = threading.Event()
self._polling_thread = threading.Thread(
target=self._update_loop,
name=f"ZDTPolling_{port}",
daemon=True
)
self._polling_thread.start()
except Exception as e:
self.logger.error(f"Failed to open serial port {port}: {e}")
self.ser = None
def _update_loop(self):
"""背景循环读取电机位置"""
while not self._stop_event.is_set():
try:
self.get_position()
except Exception as e:
if self.debug:
self.logger.error(f"Polling error: {e}")
time.sleep(1.0) # 每1秒刷新一次位置数据
def _send(self, func_code: int, payload: list) -> bytes:
"""
发送指令并接收响应
Args:
func_code: 功能码
payload: 数据负载 (list of bytes)
Returns:
响应数据 (bytes)
"""
if not self.ser:
self.logger.error("Serial port not available")
return b""
with self.lock:
# 清空输入缓冲区
self.ser.reset_input_buffer()
# 构建消息: [ID] [功能码] [数据...] [校验位=0x6B]
message = bytes([self.id, func_code] + payload + [0x6B])
# 发送
self.ser.write(message)
# 根据功能码决定响应长度
# 查询类指令返回 10 字节,控制类指令返回 4 字节
read_len = 10 if func_code in [0x31, 0x32, 0x35, 0x24, 0x27] else 4
response = self.ser.read(read_len)
# 调试输出
if self.debug:
sent_hex = message.hex().upper()
recv_hex = response.hex().upper() if response else 'TIMEOUT'
print(f"[ID {self.id}] TX: {sent_hex} → RX: {recv_hex}")
return response
def enable(self, on: bool = True) -> bool:
"""
使能/禁用电机
Args:
on: True=使能(锁轴), False=禁用(松轴)
Returns:
是否成功
"""
state = 1 if on else 0
resp = self._send(0xF3, [0xAB, state, 0])
return len(resp) >= 4
def move_speed(
self,
speed_rpm: int,
direction: str = "CW",
acceleration: int = 10
) -> bool:
"""
速度模式运行
Args:
speed_rpm: 转速 (RPM)
direction: 方向 ("CW"=顺时针, "CCW"=逆时针)
acceleration: 加速度 (0-255)
Returns:
是否成功
"""
dir_val = 0 if direction.upper() in ["CW", "顺时针"] else 1
speed_bytes = struct.pack('>H', int(speed_rpm))
self.status = f"moving@{speed_rpm}rpm"
resp = self._send(0xF6, [dir_val, speed_bytes[0], speed_bytes[1], acceleration, 0])
return len(resp) >= 4
def move_position(
self,
pulses: int,
speed_rpm: int,
direction: str = "CW",
acceleration: int = 10,
absolute: bool = False
) -> bool:
"""
位置模式运行
Args:
pulses: 脉冲数
speed_rpm: 转速 (RPM)
direction: 方向 ("CW"=顺时针, "CCW"=逆时针)
acceleration: 加速度 (0-255)
absolute: True=绝对位置, False=相对位置
Returns:
是否成功
"""
dir_val = 0 if direction.upper() in ["CW", "顺时针"] else 1
speed_bytes = struct.pack('>H', int(speed_rpm))
self.status = f"moving_to_{pulses}"
pulse_bytes = struct.pack('>I', int(pulses))
abs_flag = 1 if absolute else 0
payload = [
dir_val,
speed_bytes[0], speed_bytes[1],
acceleration,
pulse_bytes[0], pulse_bytes[1], pulse_bytes[2], pulse_bytes[3],
abs_flag,
0
]
resp = self._send(0xFD, payload)
return len(resp) >= 4
def stop(self) -> bool:
"""
停止电机
Returns:
是否成功
"""
self.status = "idle"
resp = self._send(0xFE, [0x98, 0])
return len(resp) >= 4
def rotate_quarter(self, speed_rpm: int = 60, direction: str = "CW") -> bool:
"""
电机旋转 1/4 圈 (阻塞式)
假设电机细分为 3200 脉冲/圈1/4 圈 = 800 脉冲
"""
pulses = 800
success = self.move_position(pulses=pulses, speed_rpm=speed_rpm, direction=direction, absolute=False)
if success:
# 计算预估旋转时间并进行阻塞等待 (Time = revolutions / (RPM/60))
# 1/4 rev / (RPM/60) = 15.0 / RPM
estimated_time = 15.0 / max(1, speed_rpm)
time.sleep(estimated_time + 0.5) # 额外给 0.5 秒缓冲
self.status = "idle"
return success
def wait_time(self, duration_s: float) -> bool:
"""
等待指定时间 (秒)
"""
self.logger.info(f"Waiting for {duration_s} seconds...")
time.sleep(duration_s)
return True
def set_zero(self) -> bool:
"""
清零当前位置
Returns:
是否成功
"""
resp = self._send(0x0A, [])
return len(resp) >= 4
def get_position(self) -> Optional[int]:
"""
读取当前位置 (脉冲数)
Returns:
当前位置脉冲数,失败返回 None
"""
resp = self._send(0x32, [])
if len(resp) >= 8:
# 响应格式: [ID] [Func] [符号位] [数值4字节] [校验]
sign = resp[2] # 0=正, 1=负
value = struct.unpack('>I', resp[3:7])[0]
self.position = -value if sign == 1 else value
if self.debug:
print(f"[Position] Raw: {resp.hex().upper()}, Parsed: {self.position}")
return self.position
self.logger.warning("Failed to read position")
return None
def close(self):
"""关闭串口连接并停止线程"""
if hasattr(self, '_stop_event'):
self._stop_event.set()
if self.ser and self.ser.is_open:
self.ser.close()
self.logger.info("Serial port closed")
# ============================================================
# 测试和调试代码
# ============================================================
def test_motor():
"""基础功能测试"""
logging.basicConfig(level=logging.INFO)
print("="*60)
print("ZDT X42 电机驱动测试")
print("="*60)
driver = ZDTX42Driver(
port="/dev/tty.usbserial-3110",
baudrate=115200,
device_id=2,
debug=True
)
if not driver.ser:
print("❌ 串口打开失败")
return
try:
# 测试 1: 读取位置
print("\n[1] 读取当前位置")
pos = driver.get_position()
print(f"✓ 当前位置: {pos} 脉冲")
# 测试 2: 使能
print("\n[2] 使能电机")
driver.enable(True)
time.sleep(0.3)
print("✓ 电机已锁定")
# 测试 3: 相对位置运动
print("\n[3] 相对位置运动 (1000脉冲)")
driver.move_position(pulses=1000, speed_rpm=60, direction="CW")
time.sleep(2)
pos = driver.get_position()
print(f"✓ 新位置: {pos}")
# 测试 4: 速度运动
print("\n[4] 速度模式 (30RPM, 3秒)")
driver.move_speed(speed_rpm=30, direction="CW")
time.sleep(3)
driver.stop()
pos = driver.get_position()
print(f"✓ 停止后位置: {pos}")
# 测试 5: 禁用
print("\n[5] 禁用电机")
driver.enable(False)
print("✓ 电机已松开")
print("\n" + "="*60)
print("✅ 测试完成")
print("="*60)
except Exception as e:
print(f"\n❌ 测试失败: {e}")
import traceback
traceback.print_exc()
finally:
driver.close()
if __name__ == "__main__":
test_motor()

View File

@@ -623,119 +623,6 @@ class ChinweDevice(UniversalDriver):
time.sleep(duration)
return True
def separation_step(self, motor_id: int = 5, speed: int = 60, pulses: int = 700,
max_cycles: int = 0, timeout: int = 300) -> bool:
"""
分液步骤 - 液位传感器与电机联动
当液位传感器检测到"有液"时,电机顺时针旋转指定脉冲数
当液位传感器检测到"无液"时,电机逆时针旋转指定脉冲数
:param motor_id: 电机ID (必须在初始化时配置的motor_ids中)
:param speed: 电机转速 (RPM)
:param pulses: 每次旋转的脉冲数 (默认700约为1/4圈,假设3200脉冲/圈)
:param max_cycles: 最大执行循环次数 (0=无限制,默认0)
:param timeout: 整体超时时间 (秒)
:return: 成功返回True,超时或失败返回False
"""
motor_id = int(motor_id)
speed = int(speed)
pulses = int(pulses)
max_cycles = int(max_cycles)
timeout = int(timeout)
# 检查电机是否存在
if motor_id not in self.motors:
self.logger.error(f"Motor {motor_id} not found in configured motors: {list(self.motors.keys())}")
return False
# 检查传感器是否可用
if not self.sensor:
self.logger.error("Sensor not initialized")
return False
motor = self.motors[motor_id]
# 停止轮询线程,避免与 separation_step 同时读取传感器造成串口冲突
self.logger.info("Stopping polling thread for separation_step...")
self._stop_event.set()
if self._poll_thread and self._poll_thread.is_alive():
self._poll_thread.join(timeout=2.0)
# 使能电机
self.logger.info(f"Enabling motor {motor_id}...")
motor.enable(True)
time.sleep(0.2)
self.logger.info(f"Starting separation step: motor_id={motor_id}, speed={speed} RPM, "
f"pulses={pulses}, max_cycles={max_cycles}, timeout={timeout}s")
# 记录上一次的液位状态
last_level = None
cycle_count = 0
start_time = time.time()
error_count = 0
try:
while True:
# 检查超时
if time.time() - start_time > timeout:
self.logger.warning(f"Separation step timeout after {timeout} seconds")
return False
# 检查循环次数限制
if max_cycles > 0 and cycle_count >= max_cycles:
self.logger.info(f"Separation step completed: reached max_cycles={max_cycles}")
return True
# 读取传感器数据
data = self.sensor.read_level()
if data is None:
error_count += 1
if error_count > 5:
self.logger.warning("Sensor read failed multiple times, retrying...")
error_count = 0
time.sleep(0.5)
continue
error_count = 0
current_level = data['level']
rssi = data['rssi']
# 检测状态变化 (包括首次检测)
if current_level != last_level:
cycle_count += 1
if current_level:
# 有液 -> 电机顺时针旋转
self.logger.info(f"[Cycle {cycle_count}] Liquid detected (RSSI={rssi}), "
f"rotating motor {motor_id} clockwise {pulses} pulses")
motor.run_position(pulses=pulses, speed_rpm=speed, direction=0, absolute=False)
# 等待电机完成 (预估时间)
estimated_time = 15.0 / max(1, speed)
time.sleep(estimated_time + 0.5)
else:
# 无液 -> 电机逆时针旋转
self.logger.info(f"[Cycle {cycle_count}] No liquid detected (RSSI={rssi}), "
f"rotating motor {motor_id} counter-clockwise {pulses} pulses")
motor.run_position(pulses=pulses, speed_rpm=speed, direction=1, absolute=False)
# 等待电机完成 (预估时间)
estimated_time = 15.0 / max(1, speed)
time.sleep(estimated_time + 0.5)
# 更新状态
last_level = current_level
# 轮询间隔
time.sleep(0.1)
finally:
# 恢复轮询线程
self.logger.info("Restarting polling thread...")
self._start_polling()
def execute_command_from_outer(self, command_dict: Dict[str, Any]) -> bool:
"""支持标准 JSON 指令调用"""
return super().execute_command_from_outer(command_dict)

View File

@@ -1,379 +0,0 @@
# -*- coding: utf-8 -*-
"""
XKC RS485 液位传感器 (Modbus RTU)
说明:
1. 遵循 Modbus-RTU 协议。
2. 数据寄存器: 0x0001 (液位状态, 1=有液, 0=无液), 0x0002 (RSSI 信号强度)。
3. 地址寄存器: 0x0004 (可读写, 范围 1-254)。
4. 波特率寄存器: 0x0005 (可写, 代码表见 change_baudrate 方法)。
"""
import struct
import threading
import time
import logging
import serial
from typing import Optional, Dict, Any, List
from unilabos.device_comms.universal_driver import UniversalDriver
class TransportManager:
"""
统一通信管理类。
仅支持 串口 (Serial/有线) 连接。
"""
def __init__(self, port: str, baudrate: int = 9600, timeout: float = 3.0, logger=None):
self.port = port
self.baudrate = baudrate
self.timeout = timeout
self.logger = logger
self.lock = threading.RLock() # 线程锁,确保多设备共用一个连接时不冲突
self.serial = None
self._connect_serial()
def _connect_serial(self):
try:
self.serial = serial.Serial(
port=self.port,
baudrate=self.baudrate,
timeout=self.timeout
)
except Exception as e:
raise ConnectionError(f"Serial open failed: {e}")
def close(self):
"""关闭连接"""
if self.serial and self.serial.is_open:
self.serial.close()
def clear_buffer(self):
"""清空缓冲区 (Thread-safe)"""
with self.lock:
if self.serial:
self.serial.reset_input_buffer()
def write(self, data: bytes):
"""发送原始字节"""
with self.lock:
if self.serial:
self.serial.write(data)
def read(self, size: int) -> bytes:
"""读取指定长度字节"""
if self.serial:
return self.serial.read(size)
return b''
class XKCSensorDriver(UniversalDriver):
"""XKC RS485 液位传感器 (Modbus RTU)"""
def __init__(self, port: str, baudrate: int = 9600, device_id: int = 6,
threshold: int = 300, timeout: float = 3.0, debug: bool = False):
super().__init__()
self.port = port
self.baudrate = baudrate
self.device_id = device_id
self.threshold = threshold
self.timeout = timeout
self.debug = debug
self.level = False
self.rssi = 0
self.status = {"level": self.level, "rssi": self.rssi}
try:
self.transport = TransportManager(port, baudrate, timeout, logger=self.logger)
self.logger.info(f"XKCSensorDriver connected to {port} (ID: {device_id})")
except Exception as e:
self.logger.error(f"Failed to connect XKCSensorDriver: {e}")
self.transport = None
# 启动背景轮询线程,确保 status 实时刷新
self._stop_event = threading.Event()
self._polling_thread = threading.Thread(
target=self._update_loop,
name=f"XKCPolling_{port}",
daemon=True
)
if self.transport:
self._polling_thread.start()
def _update_loop(self):
"""背景循环读取传感器数据"""
while not self._stop_event.is_set():
try:
self.read_level()
except Exception as e:
if self.debug:
self.logger.error(f"Polling error: {e}")
time.sleep(2.0) # 每2秒刷新一次数据
def _crc(self, data: bytes) -> bytes:
crc = 0xFFFF
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x0001: crc = (crc >> 1) ^ 0xA001
else: crc >>= 1
return struct.pack('<H', crc)
def read_level(self) -> Optional[Dict[str, Any]]:
"""
读取液位。
返回: {'level': bool, 'rssi': int}
"""
if not self.transport:
return None
with self.transport.lock:
self.transport.clear_buffer()
# Modbus Read Registers: 01 03 00 01 00 02 CRC
payload = struct.pack('>HH', 0x0001, 0x0002)
msg = struct.pack('BB', self.device_id, 0x03) + payload
msg += self._crc(msg)
if self.debug:
self.logger.info(f"TX (ID {self.device_id}): {msg.hex().upper()}")
self.transport.write(msg)
# Read header
h = self.transport.read(3) # Addr, Func, Len
if self.debug:
self.logger.info(f"RX Header: {h.hex().upper()}")
if len(h) < 3: return None
length = h[2]
# Read body + CRC
body = self.transport.read(length + 2)
if self.debug:
self.logger.info(f"RX Body+CRC: {body.hex().upper()}")
if len(body) < length + 2:
# Firmware bug fix specific to some modules
if len(body) == 4 and length == 4:
pass
else:
return None
data = body[:-2]
# 根据手册说明:
# 寄存器 0x0001 (data[0:2]): 液位状态 (00 01 为有液, 00 00 为无液)
# 寄存器 0x0002 (data[2:4]): 信号强度 RSSI
hw_level = False
rssi = 0
if len(data) >= 4:
hw_level = ((data[0] << 8) | data[1]) == 1
rssi = (data[2] << 8) | data[3]
elif len(data) == 2:
# 兼容模式: 某些老固件可能只返回 1 个寄存器
rssi = (data[0] << 8) | data[1]
hw_level = rssi > self.threshold
else:
return None
# 最终判定: 优先使用硬件层级的 level 判定,但 RSSI 阈值逻辑作为补充/校验
# 注意: 如果用户显式设置了 THRESHOLD我们可以在逻辑中做权衡
self.level = hw_level or (rssi > self.threshold)
self.rssi = rssi
result = {
'level': self.level,
'rssi': self.rssi
}
self.status = result
return result
def wait_level(self, target_state: bool, timeout: float = 60.0) -> bool:
"""
等待液位达到目标状态 (阻塞式)
"""
self.logger.info(f"Waiting for level: {target_state}")
start_time = time.time()
while (time.time() - start_time) < timeout:
res = self.read_level()
if res and res.get('level') == target_state:
return True
time.sleep(0.5)
self.logger.warning(f"Wait level timeout ({timeout}s)")
return False
def wait_for_liquid(self, target_state: bool, timeout: float = 120.0) -> bool:
"""
实时检测电导率(RSSI)并等待用户指定的“有液”或“无液”状态。
一旦检测到符合目标状态,立即返回。
Args:
target_state: True 为“有液”, False 为“无液”
timeout: 最大等待时间(秒)
"""
state_str = "有液" if target_state else "无液"
self.logger.info(f"开始实时检测电导率,等待状态: {state_str} (超时: {timeout}s)")
start_time = time.time()
while (time.time() - start_time) < timeout:
res = self.read_level() # 内部已更新 self.level 和 self.rssi
if res:
current_level = res.get('level')
current_rssi = res.get('rssi')
if current_level == target_state:
self.logger.info(f"✅ 检测到目标状态: {state_str} (当前电导率/RSSI: {current_rssi})")
return True
if self.debug:
self.logger.debug(f"当前状态: {'有液' if current_level else '无液'}, RSSI: {current_rssi}")
time.sleep(0.2) # 高频采样
self.logger.warning(f"❌ 等待 {state_str} 状态超时 ({timeout}s)")
return False
def set_threshold(self, threshold: int):
"""设置液位判定阈值"""
self.threshold = int(threshold)
self.logger.info(f"Threshold updated to: {self.threshold}")
def change_device_id(self, new_id: int) -> bool:
"""
修改设备的 Modbus 从站地址。
寄存器: 0x0004, 功能码: 0x06
"""
if not (1 <= new_id <= 254):
self.logger.error(f"Invalid device ID: {new_id}. Must be 1-254.")
return False
self.logger.info(f"Changing device ID from {self.device_id} to {new_id}")
success = self._write_single_register(0x0004, new_id)
if success:
self.device_id = new_id # 更新内存中的地址
self.logger.info(f"Device ID update command sent successfully (target {new_id}).")
return success
def change_baudrate(self, baud_code: int) -> bool:
"""
更改通讯波特率 (寄存器: 0x0005)。
设置成功后传感器 LED 会闪烁,通常无数据返回。
波特率代码对照表 (16进制):
05: 2400
06: 4800
07: 9600 (默认)
08: 14400
09: 19200
0A: 28800
0C: 57600
0D: 115200
0E: 128000
0F: 256000
"""
self.logger.info(f"Sending baudrate change command (Code: {baud_code:02X})")
# 写入寄存器 0x0005
self._write_single_register(0x0005, baud_code)
self.logger.info("Baudrate change command executed. Device LED should flash. Please update connection settings.")
return True
def factory_reset(self) -> bool:
"""
恢复出厂设置 (通过广播地址 FF)。
设置地址为 01逻辑为向 0x0004 写入 0x0002
"""
self.logger.info("Sending factory reset command via broadcast address FF...")
# 广播指令通常无回显
self._write_single_register(0x0004, 0x0002, slave_id=0xFF)
self.logger.info("Factory reset command sent. Device address should be 01 now.")
return True
def _write_single_register(self, reg_addr: int, value: int, slave_id: Optional[int] = None) -> bool:
"""内部辅助函数: Modbus 功能码 06 写单个寄存器"""
if not self.transport: return False
target_id = slave_id if slave_id is not None else self.device_id
msg = struct.pack('BBHH', target_id, 0x06, reg_addr, value)
msg += self._crc(msg)
with self.transport.lock:
self.transport.clear_buffer()
if self.debug:
self.logger.info(f"TX Write (Reg {reg_addr:#06x}): {msg.hex().upper()}")
self.transport.write(msg)
# 广播地址、波特率修改或厂家特定指令可能无回显
if target_id == 0xFF or reg_addr == 0x0005:
time.sleep(0.5)
return True
# 等待返回 (正常应返回相同报文)
resp = self.transport.read(len(msg))
if self.debug:
self.logger.info(f"RX Write Response: {resp.hex().upper()}")
return resp == msg
def close(self):
if self.transport:
self.transport.close()
if __name__ == "__main__":
# 快速实例化测试
import logging
# 减少冗余日志,仅显示重要信息
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
# 硬件配置 (根据实际情况修改)
TEST_PORT = "/dev/tty.usbserial-3110"
SLAVE_ID = 1
THRESHOLD = 300
print("\n" + "="*50)
print(f" XKC RS485 传感器独立测试程序")
print(f" 端口: {TEST_PORT} | 地址: {SLAVE_ID} | 阈值: {THRESHOLD}")
print("="*50)
sensor = XKCSensorDriver(port=TEST_PORT, device_id=SLAVE_ID, threshold=THRESHOLD, debug=False)
try:
if sensor.transport:
print(f"\n开始实时连续采样测试 (持续 15 秒)...")
print(f"按 Ctrl+C 可提前停止\n")
start_time = time.time()
duration = 15
count = 0
while time.time() - start_time < duration:
count += 1
res = sensor.read_level()
if res:
rssi = res['rssi']
level = res['level']
status_str = "【有液】" if level else "【无液】"
# 使用 \r 实现单行刷新显示 (或者不刷,直接打印历史)
# 为了方便查看变化,我们直接打印
elapsed = time.time() - start_time
print(f" [{elapsed:4.1f}s] 采样 {count:<3}: 电导率/RSSI = {rssi:<5} | 判定结果: {status_str}")
else:
print(f" [{time.time()-start_time:4.1f}s] 采样 {count:<3}: 通信失败 (无响应)")
time.sleep(0.5) # 每秒采样 2 次
print(f"\n--- 15 秒采样测试完成 (总计 {count} 次) ---")
# [3] 测试动态修改阈值
print(f"\n[3] 动态修改阈值演示...")
new_threshold = 400
sensor.set_threshold(new_threshold)
res = sensor.read_level()
if res:
print(f" 采样 (当前阈值={new_threshold}): 电导率/RSSI = {res['rssi']:<5} | 判定结果: {'【有液】' if res['level'] else '【无液】'}")
sensor.set_threshold(THRESHOLD) # 还原
except KeyboardInterrupt:
print("\n[!] 用户中断测试")
except Exception as e:
print(f"\n[!] 测试运行出错: {e}")
finally:
sensor.close()
print("\n--- 测试程序已退出 ---\n")

View File

@@ -1,687 +0,0 @@
"""
Virtual Workbench Device - 模拟工作台设备
包含:
- 1个机械臂 (每次操作3s, 独占锁)
- 3个加热台 (每次加热10s, 可并行)
工作流程:
1. A1-A5 物料同时启动,竞争机械臂
2. 机械臂将物料移动到空闲加热台
3. 加热完成后机械臂将物料移动到C1-C5
注意:调用来自线程池,使用 threading.Lock 进行同步
"""
import logging
import time
from typing import Dict, Any, Optional
from dataclasses import dataclass
from enum import Enum
from threading import Lock, RLock
from typing_extensions import TypedDict
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode
from unilabos.utils.decorator import not_action
# ============ TypedDict 返回类型定义 ============
class MoveToHeatingStationResult(TypedDict):
"""move_to_heating_station 返回类型"""
success: bool
station_id: int
material_id: str
material_number: int
message: str
class StartHeatingResult(TypedDict):
"""start_heating 返回类型"""
success: bool
station_id: int
material_id: str
material_number: int
message: str
class MoveToOutputResult(TypedDict):
"""move_to_output 返回类型"""
success: bool
station_id: int
material_id: str
class PrepareMaterialsResult(TypedDict):
"""prepare_materials 返回类型 - 批量准备物料"""
success: bool
count: int
material_1: int # 物料编号1
material_2: int # 物料编号2
material_3: int # 物料编号3
material_4: int # 物料编号4
material_5: int # 物料编号5
message: str
# ============ 状态枚举 ============
class HeatingStationState(Enum):
"""加热台状态枚举"""
IDLE = "idle" # 空闲
OCCUPIED = "occupied" # 已放置物料,等待加热
HEATING = "heating" # 加热中
COMPLETED = "completed" # 加热完成,等待取走
class ArmState(Enum):
"""机械臂状态枚举"""
IDLE = "idle" # 空闲
BUSY = "busy" # 工作中
@dataclass
class HeatingStation:
"""加热台数据结构"""
station_id: int
state: HeatingStationState = HeatingStationState.IDLE
current_material: Optional[str] = None # 当前物料 (如 "A1", "A2")
material_number: Optional[int] = None # 物料编号 (1-5)
heating_start_time: Optional[float] = None
heating_progress: float = 0.0
class VirtualWorkbench:
"""
Virtual Workbench Device - 虚拟工作台设备
模拟一个包含1个机械臂和3个加热台的工作站
- 机械臂操作耗时3秒同一时间只能执行一个操作
- 加热台加热耗时10秒3个加热台可并行工作
工作流:
1. 物料A1-A5并发启动线程池竞争机械臂使用权
2. 获取机械臂后,查找空闲加热台
3. 机械臂将物料放入加热台,开始加热
4. 加热完成后机械臂将物料移动到目标位置Cn
"""
_ros_node: BaseROS2DeviceNode
# 配置常量
ARM_OPERATION_TIME: float = 3.0 # 机械臂操作时间(秒)
HEATING_TIME: float = 10.0 # 加热时间(秒)
NUM_HEATING_STATIONS: int = 3 # 加热台数量
def __init__(self, device_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None, **kwargs):
# 处理可能的不同调用方式
if device_id is None and "id" in kwargs:
device_id = kwargs.pop("id")
if config is None and "config" in kwargs:
config = kwargs.pop("config")
self.device_id = device_id or "virtual_workbench"
self.config = config or {}
self.logger = logging.getLogger(f"VirtualWorkbench.{self.device_id}")
self.data: Dict[str, Any] = {}
# 从config中获取可配置参数
self.ARM_OPERATION_TIME = float(self.config.get("arm_operation_time", 3.0))
self.HEATING_TIME = float(self.config.get("heating_time", 10.0))
self.NUM_HEATING_STATIONS = int(self.config.get("num_heating_stations", 3))
# 机械臂状态和锁 (使用threading.Lock)
self._arm_lock = Lock()
self._arm_state = ArmState.IDLE
self._arm_current_task: Optional[str] = None
# 加热台状态 (station_id -> HeatingStation) - 立即初始化不依赖initialize()
self._heating_stations: Dict[int, HeatingStation] = {
i: HeatingStation(station_id=i)
for i in range(1, self.NUM_HEATING_STATIONS + 1)
}
self._stations_lock = RLock() # 可重入锁,保护加热台状态
# 任务追踪
self._active_tasks: Dict[str, Dict[str, Any]] = {} # material_id -> task_info
self._tasks_lock = Lock()
# 处理其他kwargs参数
skip_keys = {"arm_operation_time", "heating_time", "num_heating_stations"}
for key, value in kwargs.items():
if key not in skip_keys and not hasattr(self, key):
setattr(self, key, value)
self.logger.info(f"=== 虚拟工作台 {self.device_id} 已创建 ===")
self.logger.info(
f"机械臂操作时间: {self.ARM_OPERATION_TIME}s | "
f"加热时间: {self.HEATING_TIME}s | "
f"加热台数量: {self.NUM_HEATING_STATIONS}"
)
@not_action
def post_init(self, ros_node: BaseROS2DeviceNode):
"""ROS节点初始化后回调"""
self._ros_node = ros_node
@not_action
def initialize(self) -> bool:
"""初始化虚拟工作台"""
self.logger.info(f"初始化虚拟工作台 {self.device_id}")
# 重置加热台状态 (已在__init__中创建这里重置为初始状态)
with self._stations_lock:
for station in self._heating_stations.values():
station.state = HeatingStationState.IDLE
station.current_material = None
station.material_number = None
station.heating_progress = 0.0
# 初始化状态
self.data.update({
"status": "Ready",
"arm_state": ArmState.IDLE.value,
"arm_current_task": None,
"heating_stations": self._get_stations_status(),
"active_tasks_count": 0,
"message": "工作台就绪",
})
self.logger.info(f"工作台初始化完成: {self.NUM_HEATING_STATIONS}个加热台就绪")
return True
@not_action
def cleanup(self) -> bool:
"""清理虚拟工作台"""
self.logger.info(f"清理虚拟工作台 {self.device_id}")
self._arm_state = ArmState.IDLE
self._arm_current_task = None
with self._stations_lock:
self._heating_stations.clear()
with self._tasks_lock:
self._active_tasks.clear()
self.data.update({
"status": "Offline",
"arm_state": ArmState.IDLE.value,
"heating_stations": {},
"message": "工作台已关闭",
})
return True
def _get_stations_status(self) -> Dict[int, Dict[str, Any]]:
"""获取所有加热台状态"""
with self._stations_lock:
return {
station_id: {
"state": station.state.value,
"current_material": station.current_material,
"material_number": station.material_number,
"heating_progress": station.heating_progress,
}
for station_id, station in self._heating_stations.items()
}
def _update_data_status(self, message: Optional[str] = None):
"""更新状态数据"""
self.data.update({
"arm_state": self._arm_state.value,
"arm_current_task": self._arm_current_task,
"heating_stations": self._get_stations_status(),
"active_tasks_count": len(self._active_tasks),
})
if message:
self.data["message"] = message
def _find_available_heating_station(self) -> Optional[int]:
"""查找空闲的加热台
Returns:
空闲加热台ID如果没有则返回None
"""
with self._stations_lock:
for station_id, station in self._heating_stations.items():
if station.state == HeatingStationState.IDLE:
return station_id
return None
def _acquire_arm(self, task_description: str) -> bool:
"""获取机械臂使用权(阻塞直到获取)
Args:
task_description: 任务描述,用于日志
Returns:
是否成功获取
"""
self.logger.info(f"[{task_description}] 等待获取机械臂...")
# 阻塞等待获取锁
self._arm_lock.acquire()
self._arm_state = ArmState.BUSY
self._arm_current_task = task_description
self._update_data_status(f"机械臂执行: {task_description}")
self.logger.info(f"[{task_description}] 成功获取机械臂使用权")
return True
def _release_arm(self):
"""释放机械臂"""
task = self._arm_current_task
self._arm_state = ArmState.IDLE
self._arm_current_task = None
self._arm_lock.release()
self._update_data_status(f"机械臂已释放 (完成: {task})")
self.logger.info(f"机械臂已释放 (完成: {task})")
def prepare_materials(
self,
count: int = 5,
) -> PrepareMaterialsResult:
"""
批量准备物料 - 虚拟起始节点
作为工作流的起始节点,生成指定数量的物料编号供后续节点使用。
输出5个handle (material_1 ~ material_5)分别对应实验1~5。
Args:
count: 待生成的物料数量默认5 (生成 A1-A5)
Returns:
PrepareMaterialsResult: 包含 material_1 ~ material_5 用于传递给 move_to_heating_station
"""
# 生成物料列表 A1 - A{count}
materials = [i for i in range(1, count + 1)]
self.logger.info(
f"[准备物料] 生成 {count} 个物料: "
f"A1-A{count} -> material_1~material_{count}"
)
return {
"success": True,
"count": count,
"material_1": materials[0] if len(materials) > 0 else 0,
"material_2": materials[1] if len(materials) > 1 else 0,
"material_3": materials[2] if len(materials) > 2 else 0,
"material_4": materials[3] if len(materials) > 3 else 0,
"material_5": materials[4] if len(materials) > 4 else 0,
"message": f"已准备 {count} 个物料: A1-A{count}",
}
def move_to_heating_station(
self,
material_number: int,
) -> MoveToHeatingStationResult:
"""
将物料从An位置移动到加热台
多线程并发调用时,会竞争机械臂使用权,并自动查找空闲加热台
Args:
material_number: 物料编号 (1-5)
Returns:
MoveToHeatingStationResult: 包含 station_id, material_number 等用于传递给下一个节点
"""
# 根据物料编号生成物料ID
material_id = f"A{material_number}"
task_desc = f"移动{material_id}到加热台"
self.logger.info(f"[任务] {task_desc} - 开始执行")
# 记录任务
with self._tasks_lock:
self._active_tasks[material_id] = {
"status": "waiting_for_arm",
"start_time": time.time(),
}
try:
# 步骤1: 等待获取机械臂使用权(竞争)
with self._tasks_lock:
self._active_tasks[material_id]["status"] = "waiting_for_arm"
self._acquire_arm(task_desc)
# 步骤2: 查找空闲加热台
with self._tasks_lock:
self._active_tasks[material_id]["status"] = "finding_station"
station_id = None
# 循环等待直到找到空闲加热台
while station_id is None:
station_id = self._find_available_heating_station()
if station_id is None:
self.logger.info(f"[{material_id}] 没有空闲加热台,等待中...")
# 释放机械臂,等待后重试
self._release_arm()
time.sleep(0.5)
self._acquire_arm(task_desc)
# 步骤3: 占用加热台 - 立即标记为OCCUPIED防止其他任务选择同一加热台
with self._stations_lock:
self._heating_stations[station_id].state = HeatingStationState.OCCUPIED
self._heating_stations[station_id].current_material = material_id
self._heating_stations[station_id].material_number = material_number
# 步骤4: 模拟机械臂移动操作 (3秒)
with self._tasks_lock:
self._active_tasks[material_id]["status"] = "arm_moving"
self._active_tasks[material_id]["assigned_station"] = station_id
self.logger.info(f"[{material_id}] 机械臂正在移动到加热台{station_id}...")
time.sleep(self.ARM_OPERATION_TIME)
# 步骤5: 放入加热台完成
self._update_data_status(f"{material_id}已放入加热台{station_id}")
self.logger.info(f"[{material_id}] 已放入加热台{station_id} (用时{self.ARM_OPERATION_TIME}s)")
# 释放机械臂
self._release_arm()
with self._tasks_lock:
self._active_tasks[material_id]["status"] = "placed_on_station"
return {
"success": True,
"station_id": station_id,
"material_id": material_id,
"material_number": material_number,
"message": f"{material_id}已成功移动到加热台{station_id}",
}
except Exception as e:
self.logger.error(f"[{material_id}] 移动失败: {str(e)}")
if self._arm_lock.locked():
self._release_arm()
return {
"success": False,
"station_id": -1,
"material_id": material_id,
"material_number": material_number,
"message": f"移动失败: {str(e)}",
}
def start_heating(
self,
station_id: int,
material_number: int,
) -> StartHeatingResult:
"""
启动指定加热台的加热程序
Args:
station_id: 加热台ID (1-3),从 move_to_heating_station 的 handle 传入
material_number: 物料编号,从 move_to_heating_station 的 handle 传入
Returns:
StartHeatingResult: 包含 station_id, material_number 等用于传递给下一个节点
"""
self.logger.info(f"[加热台{station_id}] 开始加热")
if station_id not in self._heating_stations:
return {
"success": False,
"station_id": station_id,
"material_id": "",
"material_number": material_number,
"message": f"无效的加热台ID: {station_id}",
}
with self._stations_lock:
station = self._heating_stations[station_id]
if station.current_material is None:
return {
"success": False,
"station_id": station_id,
"material_id": "",
"material_number": material_number,
"message": f"加热台{station_id}上没有物料",
}
if station.state == HeatingStationState.HEATING:
return {
"success": False,
"station_id": station_id,
"material_id": station.current_material,
"material_number": material_number,
"message": f"加热台{station_id}已经在加热中",
}
material_id = station.current_material
# 开始加热
station.state = HeatingStationState.HEATING
station.heating_start_time = time.time()
station.heating_progress = 0.0
with self._tasks_lock:
if material_id in self._active_tasks:
self._active_tasks[material_id]["status"] = "heating"
self._update_data_status(f"加热台{station_id}开始加热{material_id}")
# 模拟加热过程 (10秒)
start_time = time.time()
while True:
elapsed = time.time() - start_time
progress = min(100.0, (elapsed / self.HEATING_TIME) * 100)
with self._stations_lock:
self._heating_stations[station_id].heating_progress = progress
self._update_data_status(f"加热台{station_id}加热中: {progress:.1f}%")
if elapsed >= self.HEATING_TIME:
break
time.sleep(1.0)
# 加热完成
with self._stations_lock:
self._heating_stations[station_id].state = HeatingStationState.COMPLETED
self._heating_stations[station_id].heating_progress = 100.0
with self._tasks_lock:
if material_id in self._active_tasks:
self._active_tasks[material_id]["status"] = "heating_completed"
self._update_data_status(f"加热台{station_id}加热完成")
self.logger.info(f"[加热台{station_id}] {material_id}加热完成 (用时{self.HEATING_TIME}s)")
return {
"success": True,
"station_id": station_id,
"material_id": material_id,
"material_number": material_number,
"message": f"加热台{station_id}加热完成",
}
def move_to_output(
self,
station_id: int,
material_number: int,
) -> MoveToOutputResult:
"""
将物料从加热台移动到输出位置Cn
Args:
station_id: 加热台ID (1-3),从 start_heating 的 handle 传入
material_number: 物料编号,从 start_heating 的 handle 传入,用于确定输出位置 Cn
Returns:
MoveToOutputResult: 包含执行结果
"""
output_number = material_number # 物料编号决定输出位置
if station_id not in self._heating_stations:
return {
"success": False,
"station_id": station_id,
"material_id": "",
"output_position": f"C{output_number}",
"message": f"无效的加热台ID: {station_id}",
}
with self._stations_lock:
station = self._heating_stations[station_id]
material_id = station.current_material
if material_id is None:
return {
"success": False,
"station_id": station_id,
"material_id": "",
"output_position": f"C{output_number}",
"message": f"加热台{station_id}上没有物料",
}
if station.state != HeatingStationState.COMPLETED:
return {
"success": False,
"station_id": station_id,
"material_id": material_id,
"output_position": f"C{output_number}",
"message": f"加热台{station_id}尚未完成加热 (当前状态: {station.state.value})",
}
output_position = f"C{output_number}"
task_desc = f"从加热台{station_id}移动{material_id}{output_position}"
self.logger.info(f"[任务] {task_desc}")
try:
with self._tasks_lock:
if material_id in self._active_tasks:
self._active_tasks[material_id]["status"] = "waiting_for_arm_output"
# 获取机械臂
self._acquire_arm(task_desc)
with self._tasks_lock:
if material_id in self._active_tasks:
self._active_tasks[material_id]["status"] = "arm_moving_to_output"
# 模拟机械臂操作 (3秒)
self.logger.info(f"[{material_id}] 机械臂正在从加热台{station_id}取出并移动到{output_position}...")
time.sleep(self.ARM_OPERATION_TIME)
# 清空加热台
with self._stations_lock:
self._heating_stations[station_id].state = HeatingStationState.IDLE
self._heating_stations[station_id].current_material = None
self._heating_stations[station_id].material_number = None
self._heating_stations[station_id].heating_progress = 0.0
self._heating_stations[station_id].heating_start_time = None
# 释放机械臂
self._release_arm()
# 任务完成
with self._tasks_lock:
if material_id in self._active_tasks:
self._active_tasks[material_id]["status"] = "completed"
self._active_tasks[material_id]["end_time"] = time.time()
self._update_data_status(f"{material_id}已移动到{output_position}")
self.logger.info(f"[{material_id}] 已成功移动到{output_position} (用时{self.ARM_OPERATION_TIME}s)")
return {
"success": True,
"station_id": station_id,
"material_id": material_id,
"output_position": output_position,
"message": f"{material_id}已成功移动到{output_position}",
}
except Exception as e:
self.logger.error(f"移动到输出位置失败: {str(e)}")
if self._arm_lock.locked():
self._release_arm()
return {
"success": False,
"station_id": station_id,
"material_id": "",
"output_position": output_position,
"message": f"移动失败: {str(e)}",
}
# ============ 状态属性 ============
@property
def status(self) -> str:
return self.data.get("status", "Unknown")
@property
def arm_state(self) -> str:
return self._arm_state.value
@property
def arm_current_task(self) -> str:
return self._arm_current_task or ""
@property
def heating_station_1_state(self) -> str:
with self._stations_lock:
station = self._heating_stations.get(1)
return station.state.value if station else "unknown"
@property
def heating_station_1_material(self) -> str:
with self._stations_lock:
station = self._heating_stations.get(1)
return station.current_material or "" if station else ""
@property
def heating_station_1_progress(self) -> float:
with self._stations_lock:
station = self._heating_stations.get(1)
return station.heating_progress if station else 0.0
@property
def heating_station_2_state(self) -> str:
with self._stations_lock:
station = self._heating_stations.get(2)
return station.state.value if station else "unknown"
@property
def heating_station_2_material(self) -> str:
with self._stations_lock:
station = self._heating_stations.get(2)
return station.current_material or "" if station else ""
@property
def heating_station_2_progress(self) -> float:
with self._stations_lock:
station = self._heating_stations.get(2)
return station.heating_progress if station else 0.0
@property
def heating_station_3_state(self) -> str:
with self._stations_lock:
station = self._heating_stations.get(3)
return station.state.value if station else "unknown"
@property
def heating_station_3_material(self) -> str:
with self._stations_lock:
station = self._heating_stations.get(3)
return station.current_material or "" if station else ""
@property
def heating_station_3_progress(self) -> float:
with self._stations_lock:
station = self._heating_stations.get(3)
return station.heating_progress if station else 0.0
@property
def active_tasks_count(self) -> int:
with self._tasks_lock:
return len(self._active_tasks)
@property
def message(self) -> str:
return self.data.get("message", "")

View File

@@ -258,7 +258,7 @@ class BioyondResourceSynchronizer(ResourceSynchronizer):
logger.info(f"[同步→Bioyond] 物料不存在于 Bioyond将创建新物料并入库")
# 第1步从配置中获取仓库配置
warehouse_mapping = self.workstation.bioyond_config.get("warehouse_mapping", {})
warehouse_mapping = self.bioyond_config.get("warehouse_mapping", {})
# 确定目标仓库名称
parent_name = None

View File

@@ -317,47 +317,6 @@ separator.chinwe:
- port
type: object
type: UniLabJsonCommand
separation_step:
goal:
max_cycles: 0
motor_id: 5
pulses: 700
speed: 60
timeout: 300
handles: {}
schema:
description: 分液步骤 - 液位传感器与电机联动 (有液→顺时针, 无液→逆时针)
properties:
goal:
properties:
max_cycles:
default: 0
description: 最大循环次数 (0=无限制)
type: integer
motor_id:
default: '5'
description: 选择电机
enum:
- '4'
- '5'
title: '注: 4=搅拌, 5=旋钮'
type: string
pulses:
default: 700
description: 每次旋转脉冲数 (约1/4圈)
type: integer
speed:
default: 60
description: 电机转速 (RPM)
type: integer
timeout:
default: 300
description: 超时时间 (秒)
type: integer
required:
- motor_id
type: object
type: UniLabJsonCommand
wait_sensor_level:
goal:
target_state: 有液

View File

@@ -9284,13 +9284,7 @@ liquid_handler.prcxi:
data_source: handle
data_type: resource
handler_key: input_wells
label: 待设定液体孔
output:
- data_key: wells.@flatten
data_source: executor
data_type: resource
handler_key: output_wells
label: 已设定液体孔
label: InputWells
placeholder_keys:
wells: unilabos_resources
result: {}
@@ -9406,163 +9400,6 @@ liquid_handler.prcxi:
title: LiquidHandlerSetLiquid
type: object
type: LiquidHandlerSetLiquid
set_liquid_from_plate:
feedback: {}
goal: {}
goal_default:
liquid_names: null
plate: null
volumes: null
well_names: null
handles:
input:
- data_key: plate
data_source: handle
data_type: resource
handler_key: input_plate
label: 待设定液体板
output:
- data_key: plate.@flatten
data_source: executor
data_type: resource
handler_key: output_plate
label: 已设定液体板
- data_key: wells.@flatten
data_source: executor
data_type: resource
handler_key: output_wells
label: 已设定液体孔
- data_key: volumes
data_source: executor
data_type: number_array
handler_key: output_volumes
label: 各孔设定体积
placeholder_keys:
plate: unilabos_resources
result: {}
schema:
description: ''
properties:
feedback: {}
goal:
properties:
liquid_names:
items:
type: string
type: array
plate:
properties:
category:
type: string
children:
items:
type: string
type: array
config:
type: string
data:
type: string
id:
type: string
name:
type: string
parent:
type: string
pose:
properties:
orientation:
properties:
w:
type: number
x:
type: number
y:
type: number
z:
type: number
required:
- x
- y
- z
- w
title: orientation
type: object
position:
properties:
x:
type: number
y:
type: number
z:
type: number
required:
- x
- y
- z
title: position
type: object
required:
- position
- orientation
title: pose
type: object
sample_id:
type: string
type:
type: string
required:
- id
- name
- sample_id
- children
- parent
- type
- category
- pose
- config
- data
title: plate
type: object
volumes:
items:
type: number
type: array
well_names:
items:
type: string
type: array
required:
- plate
- well_names
- liquid_names
- volumes
type: object
result:
properties:
plate:
items: {}
title: Plate
type: array
volumes:
items:
type: number
title: Volumes
type: array
wells:
items: {}
title: Wells
type: array
required:
- plate
- wells
- volumes
title: SetLiquidFromPlateReturn
type: object
required:
- goal
title: set_liquid_from_plate参数
type: object
type: UniLabJsonCommand
set_tiprack:
feedback: {}
goal:
@@ -9908,21 +9745,21 @@ liquid_handler.prcxi:
- 0
handles:
input:
- data_key: sources
- data_key: liquid
data_source: handle
data_type: resource
handler_key: sources_identifier
label: 待移动液体
- data_key: targets
data_source: handle
handler_key: sources
label: sources
- data_key: liquid
data_source: executor
data_type: resource
handler_key: targets_identifier
label: 转移目标
- data_key: tip_rack
data_source: handle
handler_key: targets
label: targets
- data_key: liquid
data_source: executor
data_type: resource
handler_key: tip_rack_identifier
label: 墙头盒
handler_key: tip_rack
label: tip_rack
output:
- data_key: liquid
data_source: handle

View File

@@ -1,286 +0,0 @@
motor.zdt_x42:
category:
- motor
class:
action_value_mappings:
auto-enable:
feedback: {}
goal: {}
goal_default:
'on': true
handles: {}
placeholder_keys: {}
result: {}
schema:
description: 使能或禁用电机。使能后电机进入锁轴状态,可接收运动指令;禁用后电机进入松轴状态。
properties:
feedback: {}
goal:
properties:
'on':
default: true
type: boolean
required: []
type: object
result: {}
required:
- goal
title: enable参数
type: object
type: UniLabJsonCommand
auto-get_position:
feedback: {}
goal: {}
goal_default: {}
handles: {}
placeholder_keys: {}
result: {}
schema:
description: 获取当前电机脉冲位置。
properties:
feedback: {}
goal:
properties: {}
required: []
type: object
result:
properties:
position:
type: integer
type: object
required:
- goal
title: get_position参数
type: object
type: UniLabJsonCommand
auto-move_position:
feedback: {}
goal: {}
goal_default:
absolute: false
acceleration: 10
direction: CW
pulses: 1000
speed_rpm: 60
handles: {}
placeholder_keys: {}
result: {}
schema:
description: 位置模式运行。控制电机移动到指定脉冲位置或相对于当前位置移动指定脉冲数。
properties:
feedback: {}
goal:
properties:
absolute:
default: false
type: boolean
acceleration:
default: 10
maximum: 255
minimum: 0
type: integer
direction:
default: CW
enum:
- CW
- CCW
type: string
pulses:
default: 1000
type: integer
speed_rpm:
default: 60
minimum: 0
type: integer
required:
- pulses
- speed_rpm
type: object
result: {}
required:
- goal
title: move_position参数
type: object
type: UniLabJsonCommand
auto-move_speed:
feedback: {}
goal: {}
goal_default:
acceleration: 10
direction: CW
speed_rpm: 60
handles: {}
placeholder_keys: {}
result: {}
schema:
description: 速度模式运行。控制电机以指定转速和方向持续转动。
properties:
feedback: {}
goal:
properties:
acceleration:
default: 10
maximum: 255
minimum: 0
type: integer
direction:
default: CW
enum:
- CW
- CCW
type: string
speed_rpm:
default: 60
minimum: 0
type: integer
required:
- speed_rpm
type: object
result: {}
required:
- goal
title: move_speed参数
type: object
type: UniLabJsonCommand
auto-rotate_quarter:
feedback: {}
goal: {}
goal_default:
direction: CW
speed_rpm: 60
handles: {}
placeholder_keys: {}
result: {}
schema:
description: 电机旋转 1/4 圈 (阻塞式)。
properties:
feedback: {}
goal:
properties:
direction:
default: CW
enum:
- CW
- CCW
type: string
speed_rpm:
default: 60
minimum: 1
type: integer
required: []
type: object
result: {}
required:
- goal
title: rotate_quarter参数
type: object
type: UniLabJsonCommand
auto-set_zero:
feedback: {}
goal: {}
goal_default: {}
handles: {}
placeholder_keys: {}
result: {}
schema:
description: 将当前电机位置设为零点。
properties:
feedback: {}
goal:
properties: {}
required: []
type: object
result: {}
required:
- goal
title: set_zero参数
type: object
type: UniLabJsonCommand
auto-stop:
feedback: {}
goal: {}
goal_default: {}
handles: {}
placeholder_keys: {}
result: {}
schema:
description: 立即停止电机运动。
properties:
feedback: {}
goal:
properties: {}
required: []
type: object
result: {}
required:
- goal
title: stop参数
type: object
type: UniLabJsonCommand
auto-wait_time:
feedback: {}
goal: {}
goal_default:
duration_s: 1.0
handles: {}
placeholder_keys: {}
result: {}
schema:
description: 等待指定时间 (秒)。
properties:
feedback: {}
goal:
properties:
duration_s:
default: 1.0
minimum: 0
type: number
required:
- duration_s
type: object
result: {}
required:
- goal
title: wait_time参数
type: object
type: UniLabJsonCommand
module: unilabos.devices.motor.ZDT_X42:ZDTX42Driver
status_types:
position: int
status: str
type: python
config_info: []
description: ZDT X42 闭环步进电机驱动。支持速度运行、精确位置控制、位置查询和清零功能。适用于各种需要精确运动控制的实验室自动化场景。
handles: []
icon: ''
init_param_schema:
config:
properties:
baudrate:
default: 115200
type: integer
debug:
default: false
type: boolean
device_id:
default: 1
type: integer
port:
type: string
timeout:
default: 0.5
type: number
required:
- port
type: object
data:
properties:
position:
type: integer
status:
type: string
required:
- status
- position
type: object
version: 1.0.0

View File

@@ -1,148 +0,0 @@
sensor.xkc_rs485:
category:
- sensor
- separator
class:
action_value_mappings:
auto-change_baudrate:
goal:
baud_code: 7
handles: {}
schema:
description: '更改通讯波特率 (设置成功后无返回,且需手动切换波特率重连)。代码表 (16进制): 05=2400, 06=4800,
07=9600, 08=14400, 09=19200, 0A=28800, 0C=57600, 0D=115200, 0E=128000,
0F=256000'
properties:
goal:
properties:
baud_code:
description: '波特率代码 (例如: 7 为 9600, 13 即 0x0D 为 115200)'
type: integer
required:
- baud_code
type: object
type: UniLabJsonCommand
auto-change_device_id:
goal:
new_id: 1
handles: {}
schema:
description: 修改传感器的 Modbus 从站地址
properties:
goal:
properties:
new_id:
description: 新的从站地址 (1-254)
maximum: 254
minimum: 1
type: integer
required:
- new_id
type: object
type: UniLabJsonCommand
auto-factory_reset:
goal: {}
handles: {}
schema:
description: 恢复出厂设置 (地址重置为 01)
properties:
goal:
type: object
type: UniLabJsonCommand
auto-read_level:
goal: {}
handles: {}
schema:
description: 直接读取当前液位及信号强度
properties:
goal:
type: object
type: object
type: UniLabJsonCommand
auto-set_threshold:
goal:
threshold: 300
handles: {}
schema:
description: 设置液位判定阈值
properties:
goal:
properties:
threshold:
type: integer
required:
- threshold
type: object
type: UniLabJsonCommand
auto-wait_for_liquid:
goal:
target_state: true
timeout: 120
handles: {}
schema:
description: 实时检测电导率(RSSI)并等待用户指定的状态
properties:
goal:
properties:
target_state:
default: true
description: 目标状态 (True=有液, False=无液)
type: boolean
timeout:
default: 120
description: 超时时间 (秒)
required:
- target_state
type: object
type: UniLabJsonCommand
auto-wait_level:
goal:
level: true
timeout: 10
handles: {}
schema:
description: 等待液位达到目标状态
properties:
goal:
properties:
level:
type: boolean
timeout:
type: number
required:
- level
type: object
type: UniLabJsonCommand
module: unilabos.devices.separator.xkc_sensor:XKCSensorDriver
status_types:
level: bool
rssi: int
type: python
config_info: []
description: XKC RS485 非接触式液位传感器 (Modbus RTU)
handles: []
icon: ''
init_param_schema:
config:
properties:
baudrate:
default: 9600
type: integer
debug:
default: false
type: boolean
device_id:
default: 1
type: integer
port:
type: string
threshold:
default: 300
type: integer
timeout:
default: 3.0
type: number
required:
- port
type: object
version: 1.0.0

View File

@@ -5792,381 +5792,3 @@ virtual_vacuum_pump:
- status
type: object
version: 1.0.0
virtual_workbench:
category:
- virtual_device
class:
action_value_mappings:
auto-move_to_heating_station:
feedback: {}
goal: {}
goal_default:
material_number: null
handles:
input:
- data_key: material_number
data_source: handle
data_type: workbench_material
handler_key: material_input
label: 物料编号
output:
- data_key: station_id
data_source: executor
data_type: workbench_station
handler_key: heating_station_output
label: 加热台ID
- data_key: material_number
data_source: executor
data_type: workbench_material
handler_key: material_number_output
label: 物料编号
placeholder_keys: {}
result: {}
schema:
description: 将物料从An位置移动到空闲加热台返回分配的加热台ID
properties:
feedback: {}
goal:
properties:
material_number:
description: 物料编号1-5物料ID自动生成为A{n}
type: integer
required:
- material_number
type: object
result:
description: move_to_heating_station 返回类型
properties:
material_id:
title: Material Id
type: string
material_number:
title: Material Number
type: integer
message:
title: Message
type: string
station_id:
description: 分配的加热台ID
title: Station Id
type: integer
success:
title: Success
type: boolean
required:
- success
- station_id
- material_id
- material_number
- message
title: MoveToHeatingStationResult
type: object
required:
- goal
title: move_to_heating_station参数
type: object
type: UniLabJsonCommand
auto-move_to_output:
feedback: {}
goal: {}
goal_default:
material_number: null
station_id: null
handles:
input:
- data_key: station_id
data_source: handle
data_type: workbench_station
handler_key: output_station_input
label: 加热台ID
- data_key: material_number
data_source: handle
data_type: workbench_material
handler_key: output_material_input
label: 物料编号
placeholder_keys: {}
result: {}
schema:
description: 将物料从加热台移动到输出位置Cn
properties:
feedback: {}
goal:
properties:
material_number:
description: 物料编号用于确定输出位置Cn
type: integer
station_id:
description: 加热台ID1-3从上一节点传入
type: integer
required:
- station_id
- material_number
type: object
result:
description: move_to_output 返回类型
properties:
material_id:
title: Material Id
type: string
station_id:
title: Station Id
type: integer
success:
title: Success
type: boolean
required:
- success
- station_id
- material_id
title: MoveToOutputResult
type: object
required:
- goal
title: move_to_output参数
type: object
type: UniLabJsonCommand
auto-prepare_materials:
feedback: {}
goal: {}
goal_default:
count: 5
handles:
output:
- data_key: material_1
data_source: executor
data_type: workbench_material
handler_key: channel_1
label: 实验1
- data_key: material_2
data_source: executor
data_type: workbench_material
handler_key: channel_2
label: 实验2
- data_key: material_3
data_source: executor
data_type: workbench_material
handler_key: channel_3
label: 实验3
- data_key: material_4
data_source: executor
data_type: workbench_material
handler_key: channel_4
label: 实验4
- data_key: material_5
data_source: executor
data_type: workbench_material
handler_key: channel_5
label: 实验5
placeholder_keys: {}
result: {}
schema:
description: 批量准备物料 - 虚拟起始节点生成A1-A5物料输出5个handle供后续节点使用
properties:
feedback: {}
goal:
properties:
count:
default: 5
description: 待生成的物料数量默认5 (生成 A1-A5)
type: integer
required: []
type: object
result:
description: prepare_materials 返回类型 - 批量准备物料
properties:
count:
title: Count
type: integer
material_1:
title: Material 1
type: integer
material_2:
title: Material 2
type: integer
material_3:
title: Material 3
type: integer
material_4:
title: Material 4
type: integer
material_5:
title: Material 5
type: integer
message:
title: Message
type: string
success:
title: Success
type: boolean
required:
- success
- count
- material_1
- material_2
- material_3
- material_4
- material_5
- message
title: PrepareMaterialsResult
type: object
required:
- goal
title: prepare_materials参数
type: object
type: UniLabJsonCommand
auto-start_heating:
feedback: {}
goal: {}
goal_default:
material_number: null
station_id: null
handles:
input:
- data_key: station_id
data_source: handle
data_type: workbench_station
handler_key: station_id_input
label: 加热台ID
- data_key: material_number
data_source: handle
data_type: workbench_material
handler_key: material_number_input
label: 物料编号
output:
- data_key: station_id
data_source: executor
data_type: workbench_station
handler_key: heating_done_station
label: 加热完成-加热台ID
- data_key: material_number
data_source: executor
data_type: workbench_material
handler_key: heating_done_material
label: 加热完成-物料编号
placeholder_keys: {}
result: {}
schema:
description: 启动指定加热台的加热程序
properties:
feedback: {}
goal:
properties:
material_number:
description: 物料编号,从上一节点传入
type: integer
station_id:
description: 加热台ID1-3从上一节点传入
type: integer
required:
- station_id
- material_number
type: object
result:
description: start_heating 返回类型
properties:
material_id:
title: Material Id
type: string
material_number:
title: Material Number
type: integer
message:
title: Message
type: string
station_id:
title: Station Id
type: integer
success:
title: Success
type: boolean
required:
- success
- station_id
- material_id
- material_number
- message
title: StartHeatingResult
type: object
required:
- goal
title: start_heating参数
type: object
type: UniLabJsonCommand
module: unilabos.devices.virtual.workbench:VirtualWorkbench
status_types:
active_tasks_count: int
arm_current_task: str
arm_state: str
heating_station_1_material: str
heating_station_1_progress: float
heating_station_1_state: str
heating_station_2_material: str
heating_station_2_progress: float
heating_station_2_state: str
heating_station_3_material: str
heating_station_3_progress: float
heating_station_3_state: str
message: str
status: str
type: python
config_info: []
description: Virtual Workbench with 1 robotic arm and 3 heating stations for concurrent
material processing
handles: []
icon: ''
init_param_schema:
config:
properties:
config:
type: string
device_id:
type: string
required: []
type: object
data:
properties:
active_tasks_count:
type: integer
arm_current_task:
type: string
arm_state:
type: string
heating_station_1_material:
type: string
heating_station_1_progress:
type: number
heating_station_1_state:
type: string
heating_station_2_material:
type: string
heating_station_2_progress:
type: number
heating_station_2_state:
type: string
heating_station_3_material:
type: string
heating_station_3_progress:
type: number
heating_station_3_state:
type: string
message:
type: string
status:
type: string
required:
- status
- arm_state
- arm_current_task
- heating_station_1_state
- heating_station_1_material
- heating_station_1_progress
- heating_station_2_state
- heating_station_2_material
- heating_station_2_progress
- heating_station_3_state
- heating_station_3_material
- heating_station_3_progress
- active_tasks_count
- message
type: object
version: 1.0.0

View File

@@ -4,8 +4,6 @@ import os
import sys
import inspect
import importlib
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from typing import Any, Dict, List, Union, Tuple
@@ -62,7 +60,6 @@ class Registry:
self.device_module_to_registry = {}
self.resource_type_registry = {}
self._setup_called = False # 跟踪setup是否已调用
self._registry_lock = threading.Lock() # 多线程加载时的锁
# 其他状态变量
# self.is_host_mode = False # 移至BasicConfig中
@@ -74,20 +71,6 @@ class Registry:
from unilabos.app.web.utils.action_utils import get_yaml_from_goal_type
# 获取 HostNode 类的增强信息,用于自动生成 action schema
host_node_enhanced_info = get_enhanced_class_info(
"unilabos.ros.nodes.presets.host_node:HostNode", use_dynamic=True
)
# 为 test_latency 生成 schema保留原有 description
test_latency_method_info = host_node_enhanced_info.get("action_methods", {}).get("test_latency", {})
test_latency_schema = self._generate_unilab_json_command_schema(
test_latency_method_info.get("args", []),
"test_latency",
test_latency_method_info.get("return_annotation"),
)
test_latency_schema["description"] = "用于测试延迟的动作,返回延迟时间和时间差。"
self.device_type_registry.update(
{
"host_node": {
@@ -166,22 +149,17 @@ class Registry:
"res_id": "unilabos_resources", # 将当前实验室的全部物料id作为下拉框可选择
"device_id": "unilabos_devices", # 将当前实验室的全部设备id作为下拉框可选择
"parent": "unilabos_nodes", # 将当前实验室的设备/物料作为下拉框可选择
"class_name": "unilabos_class",
},
},
"test_latency": {
"type": (
"UniLabJsonCommandAsync"
if test_latency_method_info.get("is_async", False)
else "UniLabJsonCommand"
),
"type": self.EmptyIn,
"goal": {},
"feedback": {},
"result": {},
"schema": test_latency_schema,
"goal_default": {
arg["name"]: arg["default"] for arg in test_latency_method_info.get("args", [])
},
"schema": ros_action_to_json_schema(
self.EmptyIn, "用于测试延迟的动作,返回延迟时间和时间差。"
),
"goal_default": {},
"handles": {},
},
"auto-test_resource": {
@@ -264,115 +242,67 @@ class Registry:
# 标记setup已被调用
self._setup_called = True
def _load_single_resource_file(
self, file: Path, complete_registry: bool, upload_registry: bool
) -> Tuple[Dict[str, Any], Dict[str, Any], bool]:
"""
加载单个资源文件 (线程安全)
Returns:
(data, complete_data, is_valid): 资源数据, 完整数据, 是否有效
"""
try:
with open(file, encoding="utf-8", mode="r") as f:
data = yaml.safe_load(io.StringIO(f.read()))
except Exception as e:
logger.warning(f"[UniLab Registry] 读取资源文件失败: {file}, 错误: {e}")
return {}, {}, False
if not data:
return {}, {}, False
complete_data = {}
for resource_id, resource_info in data.items():
if "version" not in resource_info:
resource_info["version"] = "1.0.0"
if "category" not in resource_info:
resource_info["category"] = [file.stem]
elif file.stem not in resource_info["category"]:
resource_info["category"].append(file.stem)
elif not isinstance(resource_info.get("category"), list):
resource_info["category"] = [resource_info["category"]]
if "config_info" not in resource_info:
resource_info["config_info"] = []
if "icon" not in resource_info:
resource_info["icon"] = ""
if "handles" not in resource_info:
resource_info["handles"] = []
if "init_param_schema" not in resource_info:
resource_info["init_param_schema"] = {}
if "config_info" in resource_info:
del resource_info["config_info"]
if "file_path" in resource_info:
del resource_info["file_path"]
complete_data[resource_id] = copy.deepcopy(dict(sorted(resource_info.items())))
if upload_registry:
class_info = resource_info.get("class", {})
if len(class_info) and "module" in class_info:
if class_info.get("type") == "pylabrobot":
res_class = get_class(class_info["module"])
if callable(res_class) and not isinstance(res_class, type):
res_instance = res_class(res_class.__name__)
res_ulr = tree_to_list([resource_plr_to_ulab(res_instance)])
resource_info["config_info"] = res_ulr
resource_info["registry_type"] = "resource"
resource_info["file_path"] = str(file.absolute()).replace("\\", "/")
complete_data = dict(sorted(complete_data.items()))
complete_data = copy.deepcopy(complete_data)
if complete_registry:
try:
with open(file, "w", encoding="utf-8") as f:
yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper)
except Exception as e:
logger.warning(f"[UniLab Registry] 写入资源文件失败: {file}, 错误: {e}")
return data, complete_data, True
def load_resource_types(self, path: os.PathLike, complete_registry: bool, upload_registry: bool):
abs_path = Path(path).absolute()
resource_path = abs_path / "resources"
files = list(resource_path.glob("*/*.yaml"))
logger.debug(f"[UniLab Registry] resources: {resource_path.exists()}, total: {len(files)}")
if not files:
return
# 使用线程池并行加载
max_workers = min(8, len(files))
results = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_file = {
executor.submit(self._load_single_resource_file, file, complete_registry, upload_registry): file
for file in files
}
for future in as_completed(future_to_file):
file = future_to_file[future]
try:
data, complete_data, is_valid = future.result()
if is_valid:
results.append((file, data))
except Exception as e:
logger.warning(f"[UniLab Registry] 处理资源文件异常: {file}, 错误: {e}")
# 线程安全地更新注册表
logger.trace(f"[UniLab Registry] load resources? {resource_path.exists()}, total: {len(files)}")
current_resource_number = len(self.resource_type_registry) + 1
with self._registry_lock:
for i, (file, data) in enumerate(results):
for i, file in enumerate(files):
with open(file, encoding="utf-8", mode="r") as f:
data = yaml.safe_load(io.StringIO(f.read()))
complete_data = {}
if data:
# 为每个资源添加文件路径信息
for resource_id, resource_info in data.items():
if "version" not in resource_info:
resource_info["version"] = "1.0.0"
if "category" not in resource_info:
resource_info["category"] = [file.stem]
elif file.stem not in resource_info["category"]:
resource_info["category"].append(file.stem)
elif not isinstance(resource_info.get("category"), list):
resource_info["category"] = [resource_info["category"]]
if "config_info" not in resource_info:
resource_info["config_info"] = []
if "icon" not in resource_info:
resource_info["icon"] = ""
if "handles" not in resource_info:
resource_info["handles"] = []
if "init_param_schema" not in resource_info:
resource_info["init_param_schema"] = {}
if "config_info" in resource_info:
del resource_info["config_info"]
if "file_path" in resource_info:
del resource_info["file_path"]
complete_data[resource_id] = copy.deepcopy(dict(sorted(resource_info.items())))
if upload_registry:
class_info = resource_info.get("class", {})
if len(class_info) and "module" in class_info:
if class_info.get("type") == "pylabrobot":
res_class = get_class(class_info["module"])
if callable(res_class) and not isinstance(
res_class, type
): # 有的是类,有的是函数,这里暂时只登记函数类的
res_instance = res_class(res_class.__name__)
res_ulr = tree_to_list([resource_plr_to_ulab(res_instance)])
resource_info["config_info"] = res_ulr
resource_info["registry_type"] = "resource"
resource_info["file_path"] = str(file.absolute()).replace("\\", "/")
complete_data = dict(sorted(complete_data.items()))
complete_data = copy.deepcopy(complete_data)
if complete_registry:
with open(file, "w", encoding="utf-8") as f:
yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper)
self.resource_type_registry.update(data)
logger.trace(
f"[UniLab Registry] Resource-{current_resource_number} File-{i+1}/{len(results)} "
logger.trace( # type: ignore
f"[UniLab Registry] Resource-{current_resource_number} File-{i+1}/{len(files)} "
+ f"Add {list(data.keys())}"
)
current_resource_number += 1
# 记录无效文件
valid_files = {r[0] for r in results}
for file in files:
if file not in valid_files:
logger.debug(f"[UniLab Registry] Res File Not Valid YAML File: {file.absolute()}")
else:
logger.debug(f"[UniLab Registry] Res File-{i+1}/{len(files)} Not Valid YAML File: {file.absolute()}")
def _extract_class_docstrings(self, module_string: str) -> Dict[str, str]:
"""
@@ -610,9 +540,11 @@ class Registry:
return final_schema
def _preserve_field_descriptions(self, new_schema: Dict[str, Any], previous_schema: Dict[str, Any]) -> None:
def _preserve_field_descriptions(
self, new_schema: Dict[str, Any], previous_schema: Dict[str, Any]
) -> None:
"""
保留之前 schema 中 goal/feedback/result 下一级字段的 description 和 title
保留之前 schema 中 goal/feedback/result 下一级字段的 description
Args:
new_schema: 新生成的 schema会被修改
@@ -634,9 +566,6 @@ class Registry:
# 保留字段的 description
if "description" in prev_field and prev_field["description"]:
field_schema["description"] = prev_field["description"]
# 保留字段的 title用户自定义的中文名
if "title" in prev_field and prev_field["title"]:
field_schema["title"] = prev_field["title"]
def _is_typed_dict(self, annotation: Any) -> bool:
"""
@@ -724,244 +653,213 @@ class Registry:
"handles": {},
}
def _load_single_device_file(
self, file: Path, complete_registry: bool, get_yaml_from_goal_type
) -> Tuple[Dict[str, Any], Dict[str, Any], bool, List[str]]:
"""
加载单个设备文件 (线程安全)
Returns:
(data, complete_data, is_valid, device_ids): 设备数据, 完整数据, 是否有效, 设备ID列表
"""
try:
with open(file, encoding="utf-8", mode="r") as f:
data = yaml.safe_load(io.StringIO(f.read()))
except Exception as e:
logger.warning(f"[UniLab Registry] 读取设备文件失败: {file}, 错误: {e}")
return {}, {}, False, []
if not data:
return {}, {}, False, []
complete_data = {}
action_str_type_mapping = {
"UniLabJsonCommand": "UniLabJsonCommand",
"UniLabJsonCommandAsync": "UniLabJsonCommandAsync",
}
status_str_type_mapping = {}
device_ids = []
for device_id, device_config in data.items():
if "version" not in device_config:
device_config["version"] = "1.0.0"
if "category" not in device_config:
device_config["category"] = [file.stem]
elif file.stem not in device_config["category"]:
device_config["category"].append(file.stem)
if "config_info" not in device_config:
device_config["config_info"] = []
if "description" not in device_config:
device_config["description"] = ""
if "icon" not in device_config:
device_config["icon"] = ""
if "handles" not in device_config:
device_config["handles"] = []
if "init_param_schema" not in device_config:
device_config["init_param_schema"] = {}
if "class" in device_config:
if "status_types" not in device_config["class"] or device_config["class"]["status_types"] is None:
device_config["class"]["status_types"] = {}
if (
"action_value_mappings" not in device_config["class"]
or device_config["class"]["action_value_mappings"] is None
):
device_config["class"]["action_value_mappings"] = {}
enhanced_info = {}
if complete_registry:
device_config["class"]["status_types"].clear()
enhanced_info = get_enhanced_class_info(device_config["class"]["module"], use_dynamic=True)
if not enhanced_info.get("dynamic_import_success", False):
continue
device_config["class"]["status_types"].update(
{k: v["return_type"] for k, v in enhanced_info["status_methods"].items()}
)
for status_name, status_type in device_config["class"]["status_types"].items():
if isinstance(status_type, tuple) or status_type in ["Any", "None", "Unknown"]:
status_type = "String"
device_config["class"]["status_types"][status_name] = status_type
try:
target_type = self._replace_type_with_class(status_type, device_id, f"状态 {status_name}")
except ROSMsgNotFound:
continue
if target_type in [dict, list]:
target_type = String
status_str_type_mapping[status_type] = target_type
device_config["class"]["status_types"] = dict(sorted(device_config["class"]["status_types"].items()))
if complete_registry:
old_action_configs = {}
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
old_action_configs[action_name] = action_config
device_config["class"]["action_value_mappings"] = {
k: v
for k, v in device_config["class"]["action_value_mappings"].items()
if not k.startswith("auto-")
}
device_config["class"]["action_value_mappings"].update(
{
f"auto-{k}": {
"type": "UniLabJsonCommandAsync" if v["is_async"] else "UniLabJsonCommand",
"goal": {},
"feedback": {},
"result": {},
"schema": self._generate_unilab_json_command_schema(
v["args"],
k,
v.get("return_annotation"),
old_action_configs.get(f"auto-{k}", {}).get("schema"),
),
"goal_default": {i["name"]: i["default"] for i in v["args"]},
"handles": old_action_configs.get(f"auto-{k}", {}).get("handles", []),
"placeholder_keys": {
i["name"]: (
"unilabos_resources"
if i["type"] == "unilabos.registry.placeholder_type:ResourceSlot"
or i["type"] == ("list", "unilabos.registry.placeholder_type:ResourceSlot")
else "unilabos_devices"
)
for i in v["args"]
if i.get("type", "")
in [
"unilabos.registry.placeholder_type:ResourceSlot",
"unilabos.registry.placeholder_type:DeviceSlot",
("list", "unilabos.registry.placeholder_type:ResourceSlot"),
("list", "unilabos.registry.placeholder_type:DeviceSlot"),
]
},
}
for k, v in enhanced_info["action_methods"].items()
if k not in device_config["class"]["action_value_mappings"]
}
)
for action_name, old_config in old_action_configs.items():
if action_name in device_config["class"]["action_value_mappings"]:
old_schema = old_config.get("schema", {})
if "description" in old_schema and old_schema["description"]:
device_config["class"]["action_value_mappings"][action_name]["schema"][
"description"
] = old_schema["description"]
device_config["init_param_schema"] = {}
device_config["init_param_schema"]["config"] = self._generate_unilab_json_command_schema(
enhanced_info["init_params"], "__init__"
)["properties"]["goal"]
device_config["init_param_schema"]["data"] = self._generate_status_types_schema(
enhanced_info["status_methods"]
)
device_config.pop("schema", None)
device_config["class"]["action_value_mappings"] = dict(
sorted(device_config["class"]["action_value_mappings"].items())
)
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
if "handles" not in action_config:
action_config["handles"] = {}
elif isinstance(action_config["handles"], list):
if len(action_config["handles"]):
logger.error(f"设备{device_id} {action_name} 的handles配置错误应该是字典类型")
continue
else:
action_config["handles"] = {}
if "type" in action_config:
action_type_str: str = action_config["type"]
if not action_type_str.startswith("UniLabJsonCommand"):
try:
target_type = self._replace_type_with_class(
action_type_str, device_id, f"动作 {action_name}"
)
except ROSMsgNotFound:
continue
action_str_type_mapping[action_type_str] = target_type
if target_type is not None:
action_config["goal_default"] = yaml.safe_load(
io.StringIO(get_yaml_from_goal_type(target_type.Goal))
)
action_config["schema"] = ros_action_to_json_schema(target_type)
else:
logger.warning(
f"[UniLab Registry] 设备 {device_id} 的动作 {action_name} 类型为空,跳过替换"
)
complete_data[device_id] = copy.deepcopy(dict(sorted(device_config.items())))
for status_name, status_type in device_config["class"]["status_types"].items():
device_config["class"]["status_types"][status_name] = status_str_type_mapping[status_type]
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
if action_config["type"] not in action_str_type_mapping:
continue
action_config["type"] = action_str_type_mapping[action_config["type"]]
self._add_builtin_actions(device_config, device_id)
device_config["file_path"] = str(file.absolute()).replace("\\", "/")
device_config["registry_type"] = "device"
device_ids.append(device_id)
complete_data = dict(sorted(complete_data.items()))
complete_data = copy.deepcopy(complete_data)
try:
with open(file, "w", encoding="utf-8") as f:
yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper)
except Exception as e:
logger.warning(f"[UniLab Registry] 写入设备文件失败: {file}, 错误: {e}")
return data, complete_data, True, device_ids
def load_device_types(self, path: os.PathLike, complete_registry: bool):
# return
abs_path = Path(path).absolute()
devices_path = abs_path / "devices"
device_comms_path = abs_path / "device_comms"
files = list(devices_path.glob("*.yaml")) + list(device_comms_path.glob("*.yaml"))
logger.trace(
logger.trace( # type: ignore
f"[UniLab Registry] devices: {devices_path.exists()}, device_comms: {device_comms_path.exists()}, "
+ f"total: {len(files)}"
)
if not files:
return
current_device_number = len(self.device_type_registry) + 1
from unilabos.app.web.utils.action_utils import get_yaml_from_goal_type
# 使用线程池并行加载
max_workers = min(8, len(files))
results = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_file = {
executor.submit(self._load_single_device_file, file, complete_registry, get_yaml_from_goal_type): file
for file in files
for i, file in enumerate(files):
with open(file, encoding="utf-8", mode="r") as f:
data = yaml.safe_load(io.StringIO(f.read()))
complete_data = {}
action_str_type_mapping = {
"UniLabJsonCommand": "UniLabJsonCommand",
"UniLabJsonCommandAsync": "UniLabJsonCommandAsync",
}
for future in as_completed(future_to_file):
file = future_to_file[future]
try:
data, complete_data, is_valid, device_ids = future.result()
if is_valid:
results.append((file, data, device_ids))
except Exception as e:
logger.warning(f"[UniLab Registry] 处理设备文件异常: {file}, 错误: {e}")
status_str_type_mapping = {}
if data:
# 在添加到注册表前处理类型替换
for device_id, device_config in data.items():
# 添加文件路径信息 - 使用规范化的完整文件路径
if "version" not in device_config:
device_config["version"] = "1.0.0"
if "category" not in device_config:
device_config["category"] = [file.stem]
elif file.stem not in device_config["category"]:
device_config["category"].append(file.stem)
if "config_info" not in device_config:
device_config["config_info"] = []
if "description" not in device_config:
device_config["description"] = ""
if "icon" not in device_config:
device_config["icon"] = ""
if "handles" not in device_config:
device_config["handles"] = []
if "init_param_schema" not in device_config:
device_config["init_param_schema"] = {}
if "class" in device_config:
if (
"status_types" not in device_config["class"]
or device_config["class"]["status_types"] is None
):
device_config["class"]["status_types"] = {}
if (
"action_value_mappings" not in device_config["class"]
or device_config["class"]["action_value_mappings"] is None
):
device_config["class"]["action_value_mappings"] = {}
enhanced_info = {}
if complete_registry:
device_config["class"]["status_types"].clear()
enhanced_info = get_enhanced_class_info(device_config["class"]["module"], use_dynamic=True)
if not enhanced_info.get("dynamic_import_success", False):
continue
device_config["class"]["status_types"].update(
{k: v["return_type"] for k, v in enhanced_info["status_methods"].items()}
)
for status_name, status_type in device_config["class"]["status_types"].items():
if isinstance(status_type, tuple) or status_type in ["Any", "None", "Unknown"]:
status_type = "String" # 替换成ROS的String便于显示
device_config["class"]["status_types"][status_name] = status_type
try:
target_type = self._replace_type_with_class(
status_type, device_id, f"状态 {status_name}"
)
except ROSMsgNotFound:
continue
if target_type in [
dict,
list,
]: # 对于嵌套类型返回的对象,暂时处理成字符串,无法直接进行转换
target_type = String
status_str_type_mapping[status_type] = target_type
device_config["class"]["status_types"] = dict(
sorted(device_config["class"]["status_types"].items())
)
if complete_registry:
# 保存原有的 action 配置(用于保留 schema 的 description 和 handles 等)
old_action_configs = {}
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
old_action_configs[action_name] = action_config
# 线程安全地更新注册表
current_device_number = len(self.device_type_registry) + 1
with self._registry_lock:
for file, data, device_ids in results:
self.device_type_registry.update(data)
for device_id in device_ids:
logger.trace(
f"[UniLab Registry] Device-{current_device_number} Add {device_id} "
device_config["class"]["action_value_mappings"] = {
k: v
for k, v in device_config["class"]["action_value_mappings"].items()
if not k.startswith("auto-")
}
# 处理动作值映射
device_config["class"]["action_value_mappings"].update(
{
f"auto-{k}": {
"type": "UniLabJsonCommandAsync" if v["is_async"] else "UniLabJsonCommand",
"goal": {},
"feedback": {},
"result": {},
"schema": self._generate_unilab_json_command_schema(
v["args"],
k,
v.get("return_annotation"),
# 传入旧的 schema 以保留字段 description
old_action_configs.get(f"auto-{k}", {}).get("schema"),
),
"goal_default": {i["name"]: i["default"] for i in v["args"]},
# 保留原有的 handles 配置
"handles": old_action_configs.get(f"auto-{k}", {}).get("handles", []),
"placeholder_keys": {
i["name"]: (
"unilabos_resources"
if i["type"] == "unilabos.registry.placeholder_type:ResourceSlot"
or i["type"]
== ("list", "unilabos.registry.placeholder_type:ResourceSlot")
else "unilabos_devices"
)
for i in v["args"]
if i.get("type", "")
in [
"unilabos.registry.placeholder_type:ResourceSlot",
"unilabos.registry.placeholder_type:DeviceSlot",
("list", "unilabos.registry.placeholder_type:ResourceSlot"),
("list", "unilabos.registry.placeholder_type:DeviceSlot"),
]
},
}
# 不生成已配置action的动作
for k, v in enhanced_info["action_methods"].items()
if k not in device_config["class"]["action_value_mappings"]
}
)
# 恢复原有的 description 信息(非 auto- 开头的动作)
for action_name, old_config in old_action_configs.items():
if action_name in device_config["class"]["action_value_mappings"]: # 有一些会被删除
old_schema = old_config.get("schema", {})
if "description" in old_schema and old_schema["description"]:
device_config["class"]["action_value_mappings"][action_name]["schema"][
"description"
] = old_schema["description"]
device_config["init_param_schema"] = {}
device_config["init_param_schema"]["config"] = self._generate_unilab_json_command_schema(
enhanced_info["init_params"], "__init__"
)["properties"]["goal"]
device_config["init_param_schema"]["data"] = self._generate_status_types_schema(
enhanced_info["status_methods"]
)
device_config.pop("schema", None)
device_config["class"]["action_value_mappings"] = dict(
sorted(device_config["class"]["action_value_mappings"].items())
)
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
if "handles" not in action_config:
action_config["handles"] = {}
elif isinstance(action_config["handles"], list):
if len(action_config["handles"]):
logger.error(f"设备{device_id} {action_name} 的handles配置错误应该是字典类型")
continue
else:
action_config["handles"] = {}
if "type" in action_config:
action_type_str: str = action_config["type"]
# 通过Json发放指令而不是通过特殊的ros action进行处理
if not action_type_str.startswith("UniLabJsonCommand"):
try:
target_type = self._replace_type_with_class(
action_type_str, device_id, f"动作 {action_name}"
)
except ROSMsgNotFound:
continue
action_str_type_mapping[action_type_str] = target_type
if target_type is not None:
action_config["goal_default"] = yaml.safe_load(
io.StringIO(get_yaml_from_goal_type(target_type.Goal))
)
action_config["schema"] = ros_action_to_json_schema(target_type)
else:
logger.warning(
f"[UniLab Registry] 设备 {device_id} 的动作 {action_name} 类型为空,跳过替换"
)
complete_data[device_id] = copy.deepcopy(dict(sorted(device_config.items()))) # 稍后dump到文件
for status_name, status_type in device_config["class"]["status_types"].items():
device_config["class"]["status_types"][status_name] = status_str_type_mapping[status_type]
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
if action_config["type"] not in action_str_type_mapping:
continue
action_config["type"] = action_str_type_mapping[action_config["type"]]
# 添加内置的驱动命令动作
self._add_builtin_actions(device_config, device_id)
device_config["file_path"] = str(file.absolute()).replace("\\", "/")
device_config["registry_type"] = "device"
logger.trace( # type: ignore
f"[UniLab Registry] Device-{current_device_number} File-{i+1}/{len(files)} Add {device_id} "
+ f"[{data[device_id].get('name', '未命名设备')}]"
)
current_device_number += 1
# 记录无效文件
valid_files = {r[0] for r in results}
for file in files:
if file not in valid_files:
logger.debug(f"[UniLab Registry] Device File Not Valid YAML File: {file.absolute()}")
complete_data = dict(sorted(complete_data.items()))
complete_data = copy.deepcopy(complete_data)
with open(file, "w", encoding="utf-8") as f:
yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper)
self.device_type_registry.update(data)
else:
logger.debug(
f"[UniLab Registry] Device File-{i+1}/{len(files)} Not Valid YAML File: {file.absolute()}"
)
def obtain_registry_device_info(self):
devices = []

View File

@@ -46,16 +46,3 @@ BIOYOND_PolymerStation_8StockCarrier:
init_param_schema: {}
registry_type: resource
version: 1.0.0
BIOYOND_PolymerStation_TipBox:
category:
- bottle_carriers
- tip_racks
class:
module: unilabos.resources.bioyond.bottle_carriers:BIOYOND_PolymerStation_TipBox
type: pylabrobot
description: BIOYOND_PolymerStation_TipBox (4x6布局24个枪头孔位)
handles: []
icon: ''
init_param_schema: {}
registry_type: resource
version: 1.0.0

View File

@@ -82,3 +82,14 @@ BIOYOND_PolymerStation_Solution_Beaker:
icon: ''
init_param_schema: {}
version: 1.0.0
BIOYOND_PolymerStation_TipBox:
category:
- bottles
- tip_boxes
class:
module: unilabos.resources.bioyond.bottles:BIOYOND_PolymerStation_TipBox
type: pylabrobot
handles: []
icon: ''
init_param_schema: {}
version: 1.0.0

View File

@@ -1,4 +1,4 @@
from pylabrobot.resources import create_homogeneous_resources, Coordinate, ResourceHolder, create_ordered_items_2d, Container
from pylabrobot.resources import create_homogeneous_resources, Coordinate, ResourceHolder, create_ordered_items_2d
from unilabos.resources.itemized_carrier import BottleCarrier
from unilabos.resources.bioyond.bottles import (
@@ -9,28 +9,6 @@ from unilabos.resources.bioyond.bottles import (
BIOYOND_PolymerStation_Reagent_Bottle,
BIOYOND_PolymerStation_Flask,
)
def BIOYOND_PolymerStation_Tip(name: str, size_x: float = 8.0, size_y: float = 8.0, size_z: float = 50.0) -> Container:
"""创建单个枪头资源
Args:
name: 枪头名称
size_x: 枪头宽度 (mm)
size_y: 枪头长度 (mm)
size_z: 枪头高度 (mm)
Returns:
Container: 枪头容器
"""
return Container(
name=name,
size_x=size_x,
size_y=size_y,
size_z=size_z,
category="tip",
model="BIOYOND_PolymerStation_Tip",
)
# 命名约定:试剂瓶-Bottle烧杯-Beaker烧瓶-Flask,小瓶-Vial
@@ -344,88 +322,3 @@ def BIOYOND_Electrolyte_1BottleCarrier(name: str) -> BottleCarrier:
carrier.num_items_z = 1
carrier[0] = BIOYOND_PolymerStation_Solution_Beaker(f"{name}_beaker_1")
return carrier
def BIOYOND_PolymerStation_TipBox(
name: str,
size_x: float = 127.76, # 枪头盒宽度
size_y: float = 85.48, # 枪头盒长度
size_z: float = 100.0, # 枪头盒高度
barcode: str = None,
) -> BottleCarrier:
"""创建4×6枪头盒 (24个枪头) - 使用 BottleCarrier 结构
Args:
name: 枪头盒名称
size_x: 枪头盒宽度 (mm)
size_y: 枪头盒长度 (mm)
size_z: 枪头盒高度 (mm)
barcode: 条形码
Returns:
BottleCarrier: 包含24个枪头孔位的枪头盒载架
布局说明:
- 4行×6列 (A-D, 1-6)
- 枪头孔位间距: 18mm (x方向) × 18mm (y方向)
- 起始位置居中对齐
- 索引顺序: 列优先 (0=A1, 1=B1, 2=C1, 3=D1, 4=A2, ...)
"""
# 枪头孔位参数
num_cols = 6 # 1-6 (x方向)
num_rows = 4 # A-D (y方向)
tip_diameter = 8.0 # 枪头孔位直径
tip_spacing_x = 18.0 # 列间距 (增加到18mm更宽松)
tip_spacing_y = 18.0 # 行间距 (增加到18mm更宽松)
# 计算起始位置 (居中对齐)
total_width = (num_cols - 1) * tip_spacing_x + tip_diameter
total_height = (num_rows - 1) * tip_spacing_y + tip_diameter
start_x = (size_x - total_width) / 2
start_y = (size_y - total_height) / 2
# 使用 create_ordered_items_2d 创建孔位
# create_ordered_items_2d 返回的 key 是数字索引: 0, 1, 2, ...
# 顺序是列优先: 先y后x (即 0=A1, 1=B1, 2=C1, 3=D1, 4=A2, 5=B2, ...)
sites = create_ordered_items_2d(
klass=ResourceHolder,
num_items_x=num_cols,
num_items_y=num_rows,
dx=start_x,
dy=start_y,
dz=5.0,
item_dx=tip_spacing_x,
item_dy=tip_spacing_y,
size_x=tip_diameter,
size_y=tip_diameter,
size_z=50.0, # 枪头深度
)
# 更新 sites 中每个 ResourceHolder 的名称
for k, v in sites.items():
v.name = f"{name}_{v.name}"
# 创建枪头盒载架
# 注意:不设置 category使用默认的 "bottle_carrier",这样前端会显示为完整的矩形载架
tip_box = BottleCarrier(
name=name,
size_x=size_x,
size_y=size_y,
size_z=size_z,
sites=sites, # 直接使用数字索引的 sites
model="BIOYOND_PolymerStation_TipBox",
)
# 设置自定义属性
tip_box.barcode = barcode
tip_box.tip_count = 24 # 4行×6列
tip_box.num_items_x = num_cols
tip_box.num_items_y = num_rows
tip_box.num_items_z = 1
# ⭐ 枪头盒不需要放入子资源
# 与其他 carrier 不同,枪头盒在 Bioyond 中是一个整体
# 不需要追踪每个枪头的状态,保持为空的 ResourceHolder 即可
# 这样前端会显示24个空槽位可以用于放置枪头
return tip_box

View File

@@ -116,9 +116,7 @@ def BIOYOND_PolymerStation_TipBox(
size_z: float = 100.0, # 枪头盒高度
barcode: str = None,
):
"""创建4×6枪头盒 (24个枪头) - 使用 BottleCarrier 结构
注意:此函数已弃用,请使用 bottle_carriers.py 中的版本
"""创建4×6枪头盒 (24个枪头)
Args:
name: 枪头盒名称
@@ -128,11 +126,55 @@ def BIOYOND_PolymerStation_TipBox(
barcode: 条形码
Returns:
BottleCarrier: 包含24个枪头孔位的枪头盒载架
TipBoxCarrier: 包含24个枪头孔位的枪头盒
"""
# 重定向到 bottle_carriers.py 中的实现
from unilabos.resources.bioyond.bottle_carriers import BIOYOND_PolymerStation_TipBox as TipBox_Carrier
return TipBox_Carrier(name=name, size_x=size_x, size_y=size_y, size_z=size_z, barcode=barcode)
from pylabrobot.resources import Container, Coordinate
# 创建枪头盒容器
tip_box = Container(
name=name,
size_x=size_x,
size_y=size_y,
size_z=size_z,
category="tip_rack",
model="BIOYOND_PolymerStation_TipBox_4x6",
)
# 设置自定义属性
tip_box.barcode = barcode
tip_box.tip_count = 24 # 4行×6列
tip_box.num_items_x = 6 # 6列
tip_box.num_items_y = 4 # 4行
# 创建24个枪头孔位 (4行×6列)
# 假设孔位间距为 9mm
tip_spacing_x = 9.0 # 列间距
tip_spacing_y = 9.0 # 行间距
start_x = 14.38 # 第一个孔位的x偏移
start_y = 11.24 # 第一个孔位的y偏移
for row in range(4): # A, B, C, D
for col in range(6): # 1-6
spot_name = f"{chr(65 + row)}{col + 1}" # A1, A2, ..., D6
x = start_x + col * tip_spacing_x
y = start_y + row * tip_spacing_y
# 创建枪头孔位容器
tip_spot = Container(
name=spot_name,
size_x=8.0, # 单个枪头孔位大小
size_y=8.0,
size_z=size_z - 10.0, # 略低于盒子高度
category="tip_spot",
)
# 添加到枪头盒
tip_box.assign_child_resource(
tip_spot,
location=Coordinate(x=x, y=y, z=0)
)
return tip_box
def BIOYOND_PolymerStation_Flask(

View File

@@ -260,7 +260,7 @@ def read_node_link_json(
resource_tree_set = canonicalize_nodes_data(nodes)
# 标准化边数据
links = data.get("links", data.get("edges", []))
links = data.get("links", [])
standardized_links = canonicalize_links_ports(links, resource_tree_set)
# 构建 NetworkX 图(需要转换回 dict 格式)
@@ -597,8 +597,6 @@ def resource_plr_to_ulab(resource_plr: "ResourcePLR", parent_name: str = None, w
"tube": "tube",
"bottle_carrier": "bottle_carrier",
"plate_adapter": "plate_adapter",
"electrode_sheet": "electrode_sheet",
"material_hole": "material_hole",
}
if source in replace_info:
return replace_info[source]
@@ -759,12 +757,9 @@ def resource_bioyond_to_plr(bioyond_materials: list[dict], type_mapping: Dict[st
bottle = plr_material[number] = initialize_resource(
{"name": f'{detail["name"]}_{number}', "class": reverse_type_mapping[typeName][0]}, resource_type=ResourcePLR
)
# 只有具有 tracker 属性的容器才设置液体信息(如 Bottle, Well
# ResourceHolder 等不支持液体追踪的容器跳过
if hasattr(bottle, "tracker"):
bottle.tracker.liquids = [
(detail["name"], float(detail.get("quantity", 0)) if detail.get("quantity") else 0)
]
bottle.tracker.liquids = [
(detail["name"], float(detail.get("quantity", 0)) if detail.get("quantity") else 0)
]
bottle.code = detail.get("code", "")
logger.debug(f" └─ [子物料] {detail['name']}{plr_material.name}[{number}] (类型:{typeName})")
else:
@@ -773,11 +768,9 @@ def resource_bioyond_to_plr(bioyond_materials: list[dict], type_mapping: Dict[st
# 只对有 capacity 属性的容器(液体容器)处理液体追踪
if hasattr(plr_material, 'capacity'):
bottle = plr_material[0] if plr_material.capacity > 0 else plr_material
# 确保 bottletracker 属性才设置液体信息
if hasattr(bottle, "tracker"):
bottle.tracker.liquids = [
(material["name"], float(material.get("quantity", 0)) if material.get("quantity") else 0)
]
bottle.tracker.liquids = [
(material["name"], float(material.get("quantity", 0)) if material.get("quantity") else 0)
]
plr_materials.append(plr_material)
@@ -806,29 +799,24 @@ def resource_bioyond_to_plr(bioyond_materials: list[dict], type_mapping: Dict[st
wh_name = loc.get("whName")
logger.debug(f"[物料位置] {unique_name} 尝试放置到 warehouse: {wh_name} (Bioyond坐标: x={loc.get('x')}, y={loc.get('y')}, z={loc.get('z')})")
# Bioyond坐标映射 (重要!): x→行(1=A,2=B...), y→列(1=01,2=02...), z→层(通常=1)
# 必须在warehouse映射之前先获取坐标以便后续调整
x = loc.get("x", 1) # 行号 (1-based: 1=A, 2=B, 3=C, 4=D)
y = loc.get("y", 1) # 列号 (1-based: 1=01, 2=02, 3=03...)
z = loc.get("z", 1) # 层号 (1-based, 通常为1)
# 特殊处理: Bioyond的"堆栈1"需要映射到"堆栈1左"或"堆栈1右"
# 根据列号(y)判断: 1-4映射到左侧, 5-8映射到右侧
# 根据列号(x)判断: 1-4映射到左侧, 5-8映射到右侧
if wh_name == "堆栈1":
if 1 <= y <= 4:
x_val = loc.get("x", 1)
if 1 <= x_val <= 4:
wh_name = "堆栈1左"
elif 5 <= y <= 8:
elif 5 <= x_val <= 8:
wh_name = "堆栈1右"
y = y - 4 # 调整列号: 5-8映射到1-4
else:
logger.warning(f"物料 {material['name']} 的列号 y={y} 超出范围无法映射到堆栈1左或堆栈1右")
logger.warning(f"物料 {material['name']} 的列号 x={x_val} 超出范围无法映射到堆栈1左或堆栈1右")
continue
# 特殊处理: Bioyond的"站内Tip盒堆栈"也需要进行拆分映射
if wh_name == "站内Tip盒堆栈":
if y == 1:
y_val = loc.get("y", 1)
if y_val == 1:
wh_name = "站内Tip盒堆栈(右)"
elif y in [2, 3]:
elif y_val in [2, 3]:
wh_name = "站内Tip盒堆栈(左)"
y = y - 1 # 调整列号,因为左侧仓库对应的 Bioyond y=2 实际上是它的第1列
@@ -836,6 +824,15 @@ def resource_bioyond_to_plr(bioyond_materials: list[dict], type_mapping: Dict[st
warehouse = deck.warehouses[wh_name]
logger.debug(f"[Warehouse匹配] 找到warehouse: {wh_name} (容量: {warehouse.capacity}, 行×列: {warehouse.num_items_x}×{warehouse.num_items_y})")
# Bioyond坐标映射 (重要!): x→行(1=A,2=B...), y→列(1=01,2=02...), z→层(通常=1)
x = loc.get("x", 1) # 行号 (1-based: 1=A, 2=B, 3=C, 4=D)
y = loc.get("y", 1) # 列号 (1-based: 1=01, 2=02, 3=03...)
z = loc.get("z", 1) # 层号 (1-based, 通常为1)
# 如果是右侧堆栈,需要调整列号 (5→1, 6→2, 7→3, 8→4)
if wh_name == "堆栈1右":
y = y - 4 # 将5-8映射到1-4
# 特殊处理竖向warehouse站内试剂存放堆栈、测量小瓶仓库
# 这些warehouse使用 vertical-col-major 布局
if wh_name in ["站内试剂存放堆栈", "测量小瓶仓库(测密度)"]:

View File

@@ -18,9 +18,3 @@ def register():
from unilabos.devices.liquid_handling.rviz_backend import UniLiquidHandlerRvizBackend
from unilabos.devices.liquid_handling.laiyu.backend.laiyu_v_backend import UniLiquidHandlerLaiyuBackend
# noinspection PyUnresolvedReferences
from unilabos.resources.bioyond.decks import (
BIOYOND_PolymerReactionStation_Deck,
BIOYOND_PolymerPreparationStation_Deck,
BIOYOND_YB_Deck,
)

View File

@@ -13,9 +13,6 @@ if TYPE_CHECKING:
from pylabrobot.resources import Resource as PLRResource
EXTRA_CLASS = "unilabos_resource_class"
class ResourceDictPositionSize(BaseModel):
depth: float = Field(description="Depth", default=0.0) # z
width: float = Field(description="Width", default=0.0) # x
@@ -341,7 +338,6 @@ class ResourceTreeSet(object):
"deck": "deck",
"tip_rack": "tip_rack",
"tip_spot": "tip_spot",
"tip": "tip", # 添加 tip 类型支持
"tube": "tube",
"bottle_carrier": "bottle_carrier",
}
@@ -397,7 +393,7 @@ class ResourceTreeSet(object):
"parent": parent_resource, # 直接传入 ResourceDict 对象
"parent_uuid": parent_uuid, # 使用 parent_uuid 而不是 parent 对象
"type": replace_plr_type(d.get("category", "")),
"class": extra.get(EXTRA_CLASS, ""),
"class": d.get("class", ""),
"position": pos,
"pose": pos,
"config": {
@@ -447,7 +443,7 @@ class ResourceTreeSet(object):
trees.append(tree_instance)
return cls(trees)
def to_plr_resources(self, skip_devices=True) -> List["PLRResource"]:
def to_plr_resources(self) -> List["PLRResource"]:
"""
将 ResourceTreeSet 转换为 PLR 资源列表
@@ -472,7 +468,6 @@ class ResourceTreeSet(object):
name_to_uuid[node.res_content.name] = node.res_content.uuid
all_states[node.res_content.name] = node.res_content.data
name_to_extra[node.res_content.name] = node.res_content.extra
name_to_extra[node.res_content.name][EXTRA_CLASS] = node.res_content.klass
for child in node.children:
collect_node_data(child, name_to_uuid, all_states, name_to_extra)
@@ -517,10 +512,7 @@ class ResourceTreeSet(object):
plr_dict = node_to_plr_dict(tree.root_node, has_model)
try:
sub_cls = find_subclass(plr_dict["type"], PLRResource)
if skip_devices and plr_dict["type"] == "device":
logger.info(f"跳过更新 {plr_dict['name']} 设备是class")
continue
elif sub_cls is None:
if sub_cls is None:
raise ValueError(
f"无法找到类型 {plr_dict['type']} 对应的 PLR 资源类。原始信息:{tree.root_node.res_content}"
)
@@ -528,10 +520,6 @@ class ResourceTreeSet(object):
if "category" not in spec.parameters:
plr_dict.pop("category", None)
plr_resource = sub_cls.deserialize(plr_dict, allow_marshal=True)
from pylabrobot.resources import Coordinate
from pylabrobot.serializer import deserialize
location = cast(Coordinate, deserialize(plr_dict["location"]))
plr_resource.location = location
plr_resource.load_all_state(all_states)
# 使用 DeviceNodeResourceTracker 设置 UUID 和 Extra
tracker.loop_set_uuid(plr_resource, name_to_uuid)
@@ -998,7 +986,7 @@ class DeviceNodeResourceTracker(object):
extra = name_to_extra_map[resource_name]
self.set_resource_extra(res, extra)
if len(extra):
logger.trace(f"设置资源Extra: {resource_name} -> {extra}")
logger.debug(f"设置资源Extra: {resource_name} -> {extra}")
return 1
return 0

View File

@@ -770,16 +770,13 @@ def ros_message_to_json_schema(msg_class: Any, field_name: str) -> Dict[str, Any
return schema
def ros_action_to_json_schema(
action_class: Any, description="", previous_schema: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
def ros_action_to_json_schema(action_class: Any, description="") -> Dict[str, Any]:
"""
将 ROS Action 类转换为 JSON Schema
Args:
action_class: ROS Action 类
description: 描述
previous_schema: 之前的 schema用于保留 goal/feedback/result 下一级字段的 description
Returns:
完整的 JSON Schema 定义
@@ -813,44 +810,9 @@ def ros_action_to_json_schema(
"required": ["goal"],
}
# 保留之前 schema 中 goal/feedback/result 下一级字段的 description
if previous_schema:
_preserve_field_descriptions(schema, previous_schema)
return schema
def _preserve_field_descriptions(
new_schema: Dict[str, Any], previous_schema: Dict[str, Any]
) -> None:
"""
保留之前 schema 中 goal/feedback/result 下一级字段的 description 和 title
Args:
new_schema: 新生成的 schema会被修改
previous_schema: 之前的 schema
"""
for section in ["goal", "feedback", "result"]:
new_section = new_schema.get("properties", {}).get(section, {})
prev_section = previous_schema.get("properties", {}).get(section, {})
if not new_section or not prev_section:
continue
new_props = new_section.get("properties", {})
prev_props = prev_section.get("properties", {})
for field_name, field_schema in new_props.items():
if field_name in prev_props:
prev_field = prev_props[field_name]
# 保留字段的 description
if "description" in prev_field and prev_field["description"]:
field_schema["description"] = prev_field["description"]
# 保留字段的 title用户自定义的中文名
if "title" in prev_field and prev_field["title"]:
field_schema["title"] = prev_field["title"]
def convert_ros_action_to_jsonschema(
action_name_or_type: Union[str, Type], output_file: Optional[str] = None, format: str = "json"
) -> Dict[str, Any]:

View File

@@ -49,6 +49,7 @@ from unilabos.resources.resource_tracker import (
ResourceTreeInstance,
ResourceDictInstance,
)
from unilabos.ros.x.rclpyx import get_event_loop
from unilabos.ros.utils.driver_creator import WorkstationNodeCreator, PyLabRobotCreator, DeviceClassCreator
from rclpy.task import Task, Future
from unilabos.utils.import_manager import default_manager
@@ -184,7 +185,7 @@ class PropertyPublisher:
f"创建发布者 {name} 失败,可能由于注册表有误,类型: {msg_type},错误: {ex}\n{traceback.format_exc()}"
)
self.timer = node.create_timer(self.timer_period, self.publish_property)
self.__loop = ROS2DeviceNode.get_asyncio_loop()
self.__loop = get_event_loop()
str_msg_type = str(msg_type)[8:-2]
self.node.lab_logger().trace(f"发布属性: {name}, 类型: {str_msg_type}, 周期: {initial_period}秒, QoS: {qos}")
@@ -884,9 +885,6 @@ class BaseROS2DeviceNode(Node, Generic[T]):
parent_appended = True
# 加载状态
original_instance.location = plr_resource.location
original_instance.rotation = plr_resource.rotation
original_instance.barcode = plr_resource.barcode
original_instance.load_all_state(states)
child_count = len(original_instance.get_all_children())
self.lab_logger().info(
@@ -1322,32 +1320,19 @@ class BaseROS2DeviceNode(Node, Generic[T]):
resource_inputs = action_kwargs[k] if is_sequence else [action_kwargs[k]]
# 批量查询资源
queried_resources: list = [None] * len(resource_inputs)
uuid_indices: list[tuple[int, str, dict]] = [] # (index, uuid, resource_data)
# 第一遍处理没有uuid的资源收集有uuid的资源信息
for idx, resource_data in enumerate(resource_inputs):
queried_resources = []
for resource_data in resource_inputs:
unilabos_uuid = resource_data.get("data", {}).get("unilabos_uuid")
if unilabos_uuid is None:
plr_resource = await self.get_resource_with_dir(
resource_id=resource_data["id"], with_children=True
)
if "sample_id" in resource_data:
plr_resource.unilabos_extra["sample_uuid"] = resource_data["sample_id"]
queried_resources[idx] = plr_resource
else:
uuid_indices.append((idx, unilabos_uuid, resource_data))
# 第二遍批量查询有uuid的资源
if uuid_indices:
uuids = [item[1] for item in uuid_indices]
resource_tree = await self.get_resource(uuids)
plr_resources = resource_tree.to_plr_resources()
for i, (idx, _, resource_data) in enumerate(uuid_indices):
plr_resource = plr_resources[i]
if "sample_id" in resource_data:
plr_resource.unilabos_extra["sample_uuid"] = resource_data["sample_id"]
queried_resources[idx] = plr_resource
resource_tree = await self.get_resource([unilabos_uuid])
plr_resource = resource_tree.to_plr_resources()[0]
if "sample_id" in resource_data:
plr_resource.unilabos_extra["sample_uuid"] = resource_data["sample_id"]
queried_resources.append(plr_resource)
self.lab_logger().debug(f"资源查询结果: 共 {len(queried_resources)} 个资源")
@@ -1772,15 +1757,6 @@ class ROS2DeviceNode:
它不继承设备类,而是通过代理模式访问设备类的属性和方法。
"""
# 类变量,用于循环管理
_asyncio_loop = None
_asyncio_loop_running = False
_asyncio_loop_thread = None
@classmethod
def get_asyncio_loop(cls):
return cls._asyncio_loop
@staticmethod
async def safe_task_wrapper(trace_callback, func, **kwargs):
try:
@@ -1857,11 +1833,6 @@ class ROS2DeviceNode:
print_publish: 是否打印发布信息
driver_is_ros:
"""
# 在初始化时检查循环状态
if ROS2DeviceNode._asyncio_loop_running and ROS2DeviceNode._asyncio_loop_thread is not None:
pass
elif ROS2DeviceNode._asyncio_loop_thread is None:
self._start_loop()
# 保存设备类是否支持异步上下文
self._has_async_context = hasattr(driver_class, "__aenter__") and hasattr(driver_class, "__aexit__")
@@ -1953,17 +1924,6 @@ class ROS2DeviceNode:
except Exception as e:
self._ros_node.lab_logger().error(f"设备后初始化失败: {e}")
def _start_loop(self):
def run_event_loop():
loop = asyncio.new_event_loop()
ROS2DeviceNode._asyncio_loop = loop
asyncio.set_event_loop(loop)
loop.run_forever()
ROS2DeviceNode._asyncio_loop_thread = threading.Thread(target=run_event_loop, daemon=True, name="ROS2DeviceNode")
ROS2DeviceNode._asyncio_loop_thread.start()
logger.info(f"循环线程已启动")
class DeviceInfoType(TypedDict):
id: str

View File

@@ -5,8 +5,7 @@ import threading
import time
import traceback
import uuid
from typing import TYPE_CHECKING, Optional, Dict, Any, List, ClassVar, Set, Union
from typing_extensions import TypedDict
from typing import TYPE_CHECKING, Optional, Dict, Any, List, ClassVar, Set, TypedDict, Union
from action_msgs.msg import GoalStatus
from geometry_msgs.msg import Point
@@ -63,18 +62,6 @@ class TestResourceReturn(TypedDict):
devices: List[DeviceSlot]
class TestLatencyReturn(TypedDict):
"""test_latency方法的返回值类型"""
avg_rtt_ms: float
avg_time_diff_ms: float
max_time_error_ms: float
task_delay_ms: float
raw_delay_ms: float
test_count: int
status: str
class HostNode(BaseROS2DeviceNode):
"""
主机节点类,负责管理设备、资源和控制器
@@ -808,7 +795,6 @@ class HostNode(BaseROS2DeviceNode):
goal_msg = convert_to_ros_msg(action_client._action_type.Goal(), action_kwargs)
self.lab_logger().info(f"[Host Node] Sending goal for {action_id}: {str(goal_msg)[:1000]}")
self.lab_logger().trace(f"[Host Node] Sending goal for {action_id}: {action_kwargs}")
self.lab_logger().trace(f"[Host Node] Sending goal for {action_id}: {goal_msg}")
action_client.wait_for_server()
goal_uuid_obj = UUID(uuid=list(u.bytes))
@@ -867,13 +853,8 @@ class HostNode(BaseROS2DeviceNode):
# 适配后端的一些额外处理
return_value = return_info.get("return_value")
if isinstance(return_value, dict):
unilabos_samples = return_value.pop("unilabos_samples", None)
if isinstance(unilabos_samples, list) and unilabos_samples:
self.lab_logger().info(
f"[Host Node] Job {job_id[:8]} returned {len(unilabos_samples)} sample(s): "
f"{[s.get('name', s.get('id', 'unknown')) if isinstance(s, dict) else str(s)[:20] for s in unilabos_samples[:5]]}"
f"{'...' if len(unilabos_samples) > 5 else ''}"
)
unilabos_samples = return_info.get("unilabos_samples")
if isinstance(unilabos_samples, list):
return_info["unilabos_samples"] = unilabos_samples
suc = return_info.get("suc", False)
if not suc:
@@ -900,7 +881,7 @@ class HostNode(BaseROS2DeviceNode):
# 清理 _goals 中的记录
if job_id in self._goals:
del self._goals[job_id]
self.lab_logger().trace(f"[Host Node] Removed goal {job_id[:8]} from _goals")
self.lab_logger().debug(f"[Host Node] Removed goal {job_id[:8]} from _goals")
# 存储结果供 HTTP API 查询
try:
@@ -1345,20 +1326,10 @@ class HostNode(BaseROS2DeviceNode):
self.lab_logger().debug(f"[Host Node-Resource] List parameters: {request}")
return response
def test_latency(self) -> TestLatencyReturn:
def test_latency(self):
"""
测试网络延迟的action实现
通过5次ping-pong机制校对时间误差并计算实际延迟
Returns:
TestLatencyReturn: 包含延迟测试结果的字典,包括:
- avg_rtt_ms: 平均往返时间(毫秒)
- avg_time_diff_ms: 平均时间差(毫秒)
- max_time_error_ms: 最大时间误差(毫秒)
- task_delay_ms: 实际任务延迟(毫秒),-1表示无法计算
- raw_delay_ms: 原始时间差(毫秒),-1表示无法计算
- test_count: 有效测试次数
- status: 测试状态,"success"表示成功,"all_timeout"表示全部超时
"""
import uuid as uuid_module
@@ -1421,15 +1392,7 @@ class HostNode(BaseROS2DeviceNode):
if not ping_results:
self.lab_logger().error("❌ 所有ping-pong测试都失败了")
return {
"avg_rtt_ms": -1.0,
"avg_time_diff_ms": -1.0,
"max_time_error_ms": -1.0,
"task_delay_ms": -1.0,
"raw_delay_ms": -1.0,
"test_count": 0,
"status": "all_timeout",
}
return {"status": "all_timeout"}
# 统计分析
rtts = [r["rtt_ms"] for r in ping_results]
@@ -1437,7 +1400,7 @@ class HostNode(BaseROS2DeviceNode):
avg_rtt_ms = sum(rtts) / len(rtts)
avg_time_diff_ms = sum(time_diffs) / len(time_diffs)
max_time_diff_error_ms: float = max(abs(min(time_diffs)), abs(max(time_diffs)))
max_time_diff_error_ms = max(abs(min(time_diffs)), abs(max(time_diffs)))
self.lab_logger().info("-" * 50)
self.lab_logger().info("[测试统计]")
@@ -1477,7 +1440,7 @@ class HostNode(BaseROS2DeviceNode):
self.lab_logger().info("=" * 60)
res: TestLatencyReturn = {
return {
"avg_rtt_ms": avg_rtt_ms,
"avg_time_diff_ms": avg_time_diff_ms,
"max_time_error_ms": max_time_diff_error_ms,
@@ -1488,14 +1451,9 @@ class HostNode(BaseROS2DeviceNode):
"test_count": len(ping_results),
"status": "success",
}
return res
def test_resource(
self,
resource: ResourceSlot = None,
resources: List[ResourceSlot] = None,
device: DeviceSlot = None,
devices: List[DeviceSlot] = None,
self, resource: ResourceSlot = None, resources: List[ResourceSlot] = None, device: DeviceSlot = None, devices: List[DeviceSlot] = None
) -> TestResourceReturn:
if resources is None:
resources = []
@@ -1556,9 +1514,7 @@ class HostNode(BaseROS2DeviceNode):
# 构建服务地址
srv_address = f"/srv{namespace}/s2c_resource_tree"
self.lab_logger().trace(
f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation started -------"
)
self.lab_logger().trace(f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation started -------")
# 创建服务客户端
sclient = self.create_client(SerialCommand, srv_address)
@@ -1593,9 +1549,7 @@ class HostNode(BaseROS2DeviceNode):
time.sleep(0.05)
response = future.result()
self.lab_logger().trace(
f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation completed -------"
)
self.lab_logger().trace(f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation completed -------")
return True
except Exception as e:

View File

@@ -6,6 +6,8 @@ from typing import List, Dict, Any, Optional, TYPE_CHECKING
import rclpy
from rosidl_runtime_py import message_to_ordereddict
from unilabos_msgs.msg import Resource
from unilabos_msgs.srv import ResourceUpdate
from unilabos.messages import * # type: ignore # protocol names
from rclpy.action import ActionServer, ActionClient
@@ -13,6 +15,7 @@ from rclpy.action.server import ServerGoalHandle
from unilabos_msgs.srv._serial_command import SerialCommand_Request, SerialCommand_Response
from unilabos.compile import action_protocol_generators
from unilabos.resources.graphio import nested_dict_to_list
from unilabos.ros.initialize_device import initialize_device_from_dict
from unilabos.ros.msgs.message_converter import (
get_action_type,
@@ -228,15 +231,15 @@ class ROS2WorkstationNode(BaseROS2DeviceNode):
try:
# 统一处理单个或多个资源
resource_id = (
protocol_kwargs[k]["id"]
if v == "unilabos_msgs/Resource"
else protocol_kwargs[k][0]["id"]
protocol_kwargs[k]["id"] if v == "unilabos_msgs/Resource" else protocol_kwargs[k][0]["id"]
)
resource_uuid = protocol_kwargs[k].get("uuid", None)
r = SerialCommand_Request()
r.command = json.dumps({"id": resource_id, "uuid": resource_uuid, "with_children": True})
# 发送请求并等待响应
response: SerialCommand_Response = await self._resource_clients["resource_get"].call_async(
response: SerialCommand_Response = await self._resource_clients[
"resource_get"
].call_async(
r
) # type: ignore
raw_data = json.loads(response.response)
@@ -304,52 +307,12 @@ class ROS2WorkstationNode(BaseROS2DeviceNode):
# 向Host更新物料当前状态
for k, v in goal.get_fields_and_field_types().items():
if v not in ["unilabos_msgs/Resource", "sequence<unilabos_msgs/Resource>"]:
continue
self.lab_logger().info(f"更新资源状态: {k}")
try:
# 去重:使用 seen 集合获取唯一的资源对象
seen = set()
unique_resources = []
# 获取资源数据,统一转换为列表
resource_data = protocol_kwargs[k]
is_sequence = v != "unilabos_msgs/Resource"
if not is_sequence:
resource_list = [resource_data] if isinstance(resource_data, dict) else resource_data
else:
# 处理序列类型,可能是嵌套列表
resource_list = []
if isinstance(resource_data, list):
for item in resource_data:
if isinstance(item, list):
resource_list.extend(item)
else:
resource_list.append(item)
else:
resource_list = [resource_data]
for res_data in resource_list:
if not isinstance(res_data, dict):
continue
res_name = res_data.get("id") or res_data.get("name")
if not res_name:
continue
# 使用 resource_tracker 获取本地 PLR 实例
plr = self.resource_tracker.figure_resource({"name": res_name}, try_mode=False)
# 获取父资源
res = self.resource_tracker.parent_resource(plr)
if id(res) not in seen:
seen.add(id(res))
unique_resources.append(res)
# 使用新的资源树接口更新
if unique_resources:
await self.update_resource(unique_resources)
except Exception as e:
self.lab_logger().error(f"资源更新失败: {e}")
self.lab_logger().error(traceback.format_exc())
if v in ["unilabos_msgs/Resource", "sequence<unilabos_msgs/Resource>"]:
r = ResourceUpdate.Request()
r.resources = [
convert_to_ros_msg(Resource, rs) for rs in nested_dict_to_list(protocol_kwargs[k])
]
response = await self._resource_clients["resource_update"].call_async(r)
# 设置成功状态和返回值
execution_success = True

182
unilabos/ros/x/rclpyx.py Normal file
View File

@@ -0,0 +1,182 @@
import asyncio
from asyncio import events
import threading
import rclpy
from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy
from rclpy.executors import await_or_execute, Executor
from rclpy.action import ActionClient, ActionServer
from rclpy.action.server import ServerGoalHandle, GoalResponse, GoalInfo, GoalStatus
from std_msgs.msg import String
from action_tutorials_interfaces.action import Fibonacci
loop = None
def get_event_loop():
global loop
return loop
async def default_handle_accepted_callback_async(goal_handle):
"""Execute the goal."""
await goal_handle.execute()
class ServerGoalHandleX(ServerGoalHandle):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
async def execute(self, execute_callback=None):
# It's possible that there has been a request to cancel the goal prior to executing.
# In this case we want to avoid the illegal state transition to EXECUTING
# but still call the users execute callback to let them handle canceling the goal.
if not self.is_cancel_requested:
self._update_state(_rclpy.GoalEvent.EXECUTE)
await self._action_server.notify_execute_async(self, execute_callback)
class ActionServerX(ActionServer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_handle_accepted_callback(default_handle_accepted_callback_async)
async def _execute_goal_request(self, request_header_and_message):
request_header, goal_request = request_header_and_message
goal_uuid = goal_request.goal_id
goal_info = GoalInfo()
goal_info.goal_id = goal_uuid
self._node.get_logger().debug('New goal request with ID: {0}'.format(goal_uuid.uuid))
# Check if goal ID is already being tracked by this action server
with self._lock:
goal_id_exists = self._handle.goal_exists(goal_info)
accepted = False
if not goal_id_exists:
# Call user goal callback
response = await await_or_execute(self._goal_callback, goal_request.goal)
if not isinstance(response, GoalResponse):
self._node.get_logger().warning(
'Goal request callback did not return a GoalResponse type. Rejecting goal.')
else:
accepted = GoalResponse.ACCEPT == response
if accepted:
# Stamp time of acceptance
goal_info.stamp = self._node.get_clock().now().to_msg()
# Create a goal handle
try:
with self._lock:
goal_handle = ServerGoalHandleX(self, goal_info, goal_request.goal)
except RuntimeError as e:
self._node.get_logger().error(
'Failed to accept new goal with ID {0}: {1}'.format(goal_uuid.uuid, e))
accepted = False
else:
self._goal_handles[bytes(goal_uuid.uuid)] = goal_handle
# Send response
response_msg = self._action_type.Impl.SendGoalService.Response()
response_msg.accepted = accepted
response_msg.stamp = goal_info.stamp
self._handle.send_goal_response(request_header, response_msg)
if not accepted:
self._node.get_logger().debug('New goal rejected: {0}'.format(goal_uuid.uuid))
return
self._node.get_logger().debug('New goal accepted: {0}'.format(goal_uuid.uuid))
# Provide the user a reference to the goal handle
# await await_or_execute(self._handle_accepted_callback, goal_handle)
asyncio.create_task(self._handle_accepted_callback(goal_handle))
async def notify_execute_async(self, goal_handle, execute_callback):
# Use provided callback, defaulting to a previously registered callback
if execute_callback is None:
if self._execute_callback is None:
return
execute_callback = self._execute_callback
# Schedule user callback for execution
self._node.get_logger().info(f"{events.get_running_loop()}")
asyncio.create_task(self._execute_goal(execute_callback, goal_handle))
# loop = asyncio.new_event_loop()
# asyncio.set_event_loop(loop)
# task = loop.create_task(self._execute_goal(execute_callback, goal_handle))
# await task
class ActionClientX(ActionClient):
feedback_queue = asyncio.Queue()
async def feedback_cb(self, msg):
await self.feedback_queue.put(msg)
async def send_goal_async(self, goal_msg):
goal_future = super().send_goal_async(
goal_msg,
feedback_callback=self.feedback_cb
)
client_goal_handle = await asyncio.ensure_future(goal_future)
if not client_goal_handle.accepted:
raise Exception("Goal rejected.")
result_future = client_goal_handle.get_result_async()
while True:
feedback_future = asyncio.ensure_future(self.feedback_queue.get())
tasks = [result_future, feedback_future]
await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
if result_future.done():
result = result_future.result().result
yield (None, result)
break
else:
feedback = feedback_future.result().feedback
yield (feedback, None)
async def main(node):
print('Node started.')
action_client = ActionClientX(node, Fibonacci, 'fibonacci')
goal_msg = Fibonacci.Goal()
goal_msg.order = 10
async for (feedback, result) in action_client.send_goal_async(goal_msg):
if feedback:
print(f'Feedback: {feedback}')
else:
print(f'Result: {result}')
print('Finished.')
async def ros_loop_node(node):
while rclpy.ok():
rclpy.spin_once(node, timeout_sec=0)
await asyncio.sleep(1e-4)
async def ros_loop(executor: Executor):
while rclpy.ok():
executor.spin_once(timeout_sec=0)
await asyncio.sleep(1e-4)
def run_event_loop():
global loop
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_forever()
def run_event_loop_in_thread():
thread = threading.Thread(target=run_event_loop, args=())
thread.start()
if __name__ == "__main__":
rclpy.init()
node = rclpy.create_node('async_subscriber')
future = asyncio.wait([ros_loop(node), main()])
asyncio.get_event_loop().run_until_complete(future)

View File

@@ -1,28 +0,0 @@
{
"nodes": [
{
"id": "workbench_1",
"name": "虚拟工作台",
"children": [],
"parent": null,
"type": "device",
"class": "virtual_workbench",
"position": {
"x": 400,
"y": 300,
"z": 0
},
"config": {
"arm_operation_time": 3.0,
"heating_time": 10.0,
"num_heating_stations": 3
},
"data": {
"status": "Ready",
"arm_state": "idle",
"message": "工作台就绪"
}
}
],
"links": []
}

View File

@@ -1,29 +0,0 @@
{
"nodes": [
{
"id": "Liquid_Sensor_1",
"name": "XKC Sensor",
"children": [],
"parent": null,
"type": "device",
"class": "sensor.xkc_rs485",
"position": {
"x": 0,
"y": 0,
"z": 0
},
"config": {
"port": "/dev/tty.usbserial-3110",
"baudrate": 9600,
"device_id": 1,
"threshold": 300,
"timeout": 3.0
},
"data": {
"level": false,
"rssi": 0
}
}
],
"links": []
}

View File

@@ -1,28 +0,0 @@
{
"nodes": [
{
"id": "ZDT_Motor",
"name": "ZDT Motor",
"children": [],
"parent": null,
"type": "device",
"class": "motor.zdt_x42",
"position": {
"x": 0,
"y": 0,
"z": 0
},
"config": {
"port": "/dev/tty.usbserial-3110",
"baudrate": 115200,
"device_id": 1,
"debug": true
},
"data": {
"position": 0,
"status": "idle"
}
}
],
"links": []
}

View File

@@ -0,0 +1,187 @@
# UniLabOS 日志配置说明
> **文件位置**: `unilabos/utils/log.py`
> **最后更新**: 2026-01-11
> **维护者**: Uni-Lab-OS 开发团队
本文档说明 UniLabOS 日志系统中对第三方库和内部模块的日志级别配置,避免控制台被过多的 DEBUG 日志淹没。
---
## 📋 已屏蔽的日志
以下库/模块的日志已被设置为 **WARNING****INFO** 级别,不再显示 DEBUG 日志:
### 1. pymodbusModbus 通信库)
**配置位置**: `log.py` 第196-200行
```python
# pymodbus 库的日志太详细,设置为 WARNING
logging.getLogger('pymodbus').setLevel(logging.WARNING)
logging.getLogger('pymodbus.logging').setLevel(logging.WARNING)
logging.getLogger('pymodbus.logging.base').setLevel(logging.WARNING)
logging.getLogger('pymodbus.logging.decoders').setLevel(logging.WARNING)
```
**屏蔽原因**:
- pymodbus 在 DEBUG 级别会输出每一次 Modbus 通信的详细信息
- 包括 `Processing: 0x5 0x1e 0x0 0x0...` 等原始数据
- 包括 `decoded PDU function_code(3 sub -1) -> ReadHoldingRegistersResponse(...)` 等解码信息
- 这些信息对日常使用价值不大,但会快速刷屏
**典型被屏蔽的日志**:
```
[DEBUG] Processing: 0x5 0x1e 0x0 0x0 0x0 0x7 0x1 0x3 0x4 0x0 0x0 0x0 0x0 [handleFrame:72] [pymodbus.logging.base]
[DEBUG] decoded PDU function_code(3 sub -1) -> ReadHoldingRegistersResponse(...) [decode:79] [pymodbus.logging.decoders]
```
---
### 2. websocketsWebSocket 库)
**配置位置**: `log.py` 第202-205行
```python
# websockets 库的日志输出较多,设置为 WARNING
logging.getLogger('websockets').setLevel(logging.WARNING)
logging.getLogger('websockets.client').setLevel(logging.WARNING)
logging.getLogger('websockets.server').setLevel(logging.WARNING)
```
**屏蔽原因**:
- WebSocket 连接、断开、心跳等信息在 DEBUG 级别会频繁输出
- 对于长时间运行的服务,这些日志意义不大
---
### 3. ROS Host Node设备状态更新
**配置位置**: `log.py` 第207-208行
```python
# ROS 节点的状态更新日志过于频繁,设置为 INFO
logging.getLogger('unilabos.ros.nodes.presets.host_node').setLevel(logging.INFO)
```
**屏蔽原因**:
- 设备状态更新(如手套箱压力)每隔几秒就会更新一次
- DEBUG 日志会记录每一次状态变化,导致日志刷屏
- 这些频繁的状态更新对调试价值不大
**典型被屏蔽的日志**:
```
[DEBUG] [/devices/host_node] Status updated: BatteryStation.data_glove_box_pressure = 4.229457855224609 [property_callback:666] [unilabos.ros.nodes.presets.host_node]
```
---
### 4. asyncio 和 urllib3
**配置位置**: `log.py` 第224-225行
```python
logging.getLogger("asyncio").setLevel(logging.INFO)
logging.getLogger("urllib3").setLevel(logging.INFO)
```
**屏蔽原因**:
- asyncio: 异步 IO 的内部调试信息
- urllib3: HTTP 请求库的连接池、重试等详细信息
---
## 🔧 如何临时启用这些日志(调试用)
### 方法1: 修改 log.py永久启用
`log.py``configure_logger()` 函数中,将对应库的日志级别改为 `logging.DEBUG`:
```python
# 临时启用 pymodbus 的 DEBUG 日志
logging.getLogger('pymodbus').setLevel(logging.DEBUG)
logging.getLogger('pymodbus.logging').setLevel(logging.DEBUG)
logging.getLogger('pymodbus.logging.base').setLevel(logging.DEBUG)
logging.getLogger('pymodbus.logging.decoders').setLevel(logging.DEBUG)
```
### 方法2: 在代码中临时启用(单次调试)
在需要调试的代码文件中添加:
```python
import logging
# 临时启用 pymodbus DEBUG 日志
logging.getLogger('pymodbus').setLevel(logging.DEBUG)
# 你的 Modbus 调试代码
...
# 调试完成后恢复
logging.getLogger('pymodbus').setLevel(logging.WARNING)
```
### 方法3: 使用环境变量或配置文件(推荐)
未来可以考虑在启动参数中添加 `--debug-modbus` 等选项来动态控制。
---
## 📊 日志级别说明
| 级别 | 数值 | 用途 | 是否显示 |
|------|------|------|---------|
| TRACE | 5 | 最详细的跟踪信息 | ✅ |
| DEBUG | 10 | 调试信息 | ✅ |
| INFO | 20 | 一般信息 | ✅ |
| WARNING | 30 | 警告信息 | ✅ |
| ERROR | 40 | 错误信息 | ✅ |
| CRITICAL | 50 | 严重错误 | ✅ |
**当前配置**:
- UniLabOS 自身代码: DEBUG 及以上全部显示
- pymodbus/websockets: **WARNING** 及以上显示(屏蔽 DEBUG/INFO
- ROS host_node: **INFO** 及以上显示(屏蔽 DEBUG
---
## ⚠️ 重要提示
### 修改生效时间
- 修改 `log.py` 后需要 **重启 unilab 服务** 才能生效
- 不需要重新安装或重新编译
### 调试 Modbus 通信问题
如果需要调试 Modbus 通信故障,应该:
1. 临时启用 pymodbus DEBUG 日志方法2
2. 复现问题
3. 查看详细的通信日志
4. 调试完成后记得恢复 WARNING 级别
### 调试设备状态问题
如果需要调试设备状态更新问题:
```python
logging.getLogger('unilabos.ros.nodes.presets.host_node').setLevel(logging.DEBUG)
```
---
## 📝 维护记录
| 日期 | 修改内容 | 操作人 |
|------|---------|--------|
| 2026-01-11 | 初始创建,添加 pymodbus、websockets、ROS host_node 屏蔽 | - |
| 2026-01-07 | 添加 pymodbus 和 websockets 屏蔽log-0107.py | - |
---
## 🔗 相关文件
- `log.py` - 日志配置主文件
- `unilabos/devices/workstation/coin_cell_assembly/` - 使用 Modbus 的扣电工作站代码
- `unilabos/ros/nodes/presets/host_node.py` - ROS 主机节点代码
---
**维护提示**: 如果添加了新的第三方库或发现新的日志刷屏问题,请在此文档中记录并更新 `log.py` 配置。

View File

@@ -182,49 +182,3 @@ def get_all_subscriptions(instance) -> list:
except Exception:
pass
return subscriptions
def not_action(func: F) -> F:
"""
标记方法为非动作的装饰器
用于装饰 driver 类中的方法,使其在 complete_registry 时不被识别为动作。
适用于辅助方法、内部工具方法等不应暴露为设备动作的公共方法。
Example:
class MyDriver:
@not_action
def helper_method(self):
# 这个方法不会被注册为动作
pass
def actual_action(self, param: str):
# 这个方法会被注册为动作
self.helper_method()
Note:
- 可以与其他装饰器组合使用,@not_action 应放在最外层
- 仅影响 complete_registry 的动作识别,不影响方法的正常调用
"""
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
# 在函数上附加标记
wrapper._is_not_action = True # type: ignore[attr-defined]
return wrapper # type: ignore[return-value]
def is_not_action(func) -> bool:
"""
检查函数是否被标记为非动作
Args:
func: 被检查的函数
Returns:
如果函数被 @not_action 装饰则返回 True否则返回 False
"""
return getattr(func, "_is_not_action", False)

View File

@@ -24,7 +24,6 @@ class EnvironmentChecker:
"msgcenterpy": "msgcenterpy",
"opentrons_shared_data": "opentrons_shared_data",
"typing_extensions": "typing_extensions",
"crcmod": "crcmod-plus",
}
# 特殊安装包(需要特殊处理的包)

View File

@@ -28,7 +28,6 @@ __all__ = [
from ast import Constant
from unilabos.utils import logger
from unilabos.utils.decorator import is_not_action
class ImportManager:
@@ -276,9 +275,6 @@ class ImportManager:
method_info = self._analyze_method_signature(method)
result["status_methods"][actual_name] = method_info
elif not name.startswith("_"):
# 检查是否被 @not_action 装饰器标记
if is_not_action(method):
continue
# 其他非_开头的方法归类为action
method_info = self._analyze_method_signature(method)
result["action_methods"][name] = method_info
@@ -334,9 +330,6 @@ class ImportManager:
if actual_name not in result["status_methods"]:
result["status_methods"][actual_name] = method_info
else:
# 检查是否被 @not_action 装饰器标记
if self._is_not_action_method(node):
continue
# 其他非_开头的方法归类为action
result["action_methods"][method_name] = method_info
return result
@@ -457,13 +450,6 @@ class ImportManager:
return True
return False
def _is_not_action_method(self, node: ast.FunctionDef) -> bool:
"""检查是否是@not_action装饰的方法"""
for decorator in node.decorator_list:
if isinstance(decorator, ast.Name) and decorator.id == "not_action":
return True
return False
def _get_property_name_from_setter(self, node: ast.FunctionDef) -> str:
"""从setter装饰器中获取属性名"""
for decorator in node.decorator_list:

View File

@@ -1,11 +1,7 @@
import psutil
import pywinauto
try:
from pywinauto_recorder import UIApplication
from pywinauto_recorder.player import UIPath, click, focus_on_application, exists, find, get_wrapper_path
except ImportError:
print("未安装pywinauto_recorder部分功能无法使用安装时注意enum")
pass
from pywinauto_recorder import UIApplication
from pywinauto_recorder.player import UIPath, click, focus_on_application, exists, find, get_wrapper_path
from pywinauto.controls.uiawrapper import UIAWrapper
from pywinauto.application import WindowSpecification
from pywinauto import findbestmatch

View File

@@ -1,18 +0,0 @@
networkx
typing_extensions
websockets
msgcenterpy>=0.1.5
opentrons_shared_data
pint
fastapi
jinja2
requests
uvicorn
pyautogui
opcua
pyserial
pandas
crcmod-plus
pymodbus
matplotlib
pylibftdi

View File

@@ -1,89 +1,3 @@
"""
工作流转换模块 - JSON 到 WorkflowGraph 的转换流程
==================== 输入格式 (JSON) ====================
{
"workflow": [
{"action": "transfer_liquid", "action_args": {"sources": "cell_lines", "targets": "Liquid_1", "asp_vol": 100.0, "dis_vol": 74.75, ...}},
...
],
"reagent": {
"cell_lines": {"slot": 4, "well": ["A1", "A3", "A5"], "labware": "DRUG + YOYO-MEDIA"},
"Liquid_1": {"slot": 1, "well": ["A4", "A7", "A10"], "labware": "rep 1"},
...
}
}
==================== 转换步骤 ====================
第一步: 按 slot 去重创建 create_resource 节点(创建板子)
--------------------------------------------------------------------------------
- 遍历所有 reagent按 slot 去重,为每个唯一的 slot 创建一个板子
- 生成参数:
res_id: plate_slot_{slot}
device_id: /PRCXI
class_name: PRCXI_BioER_96_wellplate
parent: /PRCXI/PRCXI_Deck/T{slot}
slot_on_deck: "{slot}"
- 输出端口: labware用于连接 set_liquid_from_plate
- 控制流: create_resource 之间通过 ready 端口串联
示例: slot=1, slot=4 -> 创建 2 个 create_resource 节点
第二步: 为每个 reagent 创建 set_liquid_from_plate 节点(设置液体)
--------------------------------------------------------------------------------
- 遍历所有 reagent为每个试剂创建 set_liquid_from_plate 节点
- 生成参数:
plate: [](通过连接传递,来自 create_resource 的 labware
well_names: ["A1", "A3", "A5"](来自 reagent 的 well 数组)
liquid_names: ["cell_lines", "cell_lines", "cell_lines"](与 well 数量一致)
volumes: [1e5, 1e5, 1e5](与 well 数量一致,默认体积)
- 输入连接: create_resource (labware) -> set_liquid_from_plate (input_plate)
- 输出端口: output_wells用于连接 transfer_liquid
- 控制流: set_liquid_from_plate 连接在所有 create_resource 之后,通过 ready 端口串联
第三步: 解析 workflow创建 transfer_liquid 等动作节点
--------------------------------------------------------------------------------
- 遍历 workflow 数组,为每个动作创建步骤节点
- 参数重命名: asp_vol -> asp_vols, dis_vol -> dis_vols, asp_flow_rate -> asp_flow_rates, dis_flow_rate -> dis_flow_rates
- 参数扩展: 根据 targets 的 wells 数量,将单值扩展为数组
例: asp_vol=100.0, targets 有 3 个 wells -> asp_vols=[100.0, 100.0, 100.0]
- 连接处理: 如果 sources/targets 已通过 set_liquid_from_plate 连接,参数值改为 []
- 输入连接: set_liquid_from_plate (output_wells) -> transfer_liquid (sources_identifier / targets_identifier)
- 输出端口: sources_out, targets_out用于连接下一个 transfer_liquid
==================== 连接关系图 ====================
控制流 (ready 端口串联):
create_resource_1 -> create_resource_2 -> ... -> set_liquid_1 -> set_liquid_2 -> ... -> transfer_liquid_1 -> transfer_liquid_2 -> ...
物料流:
[create_resource] --labware--> [set_liquid_from_plate] --output_wells--> [transfer_liquid] --sources_out/targets_out--> [下一个 transfer_liquid]
(slot=1) (cell_lines) (input_plate) (sources_identifier) (sources_identifier)
(slot=4) (Liquid_1) (targets_identifier) (targets_identifier)
==================== 端口映射 ====================
create_resource:
输出: labware
set_liquid_from_plate:
输入: input_plate
输出: output_plate, output_wells
transfer_liquid:
输入: sources -> sources_identifier, targets -> targets_identifier
输出: sources -> sources_out, targets -> targets_out
==================== 校验规则 ====================
- 检查 sources/targets 是否在 reagent 中定义
- 检查 sources 和 targets 的 wells 数量是否匹配
- 检查参数数组长度是否与 wells 数量一致
- 如有问题,在 footer 中添加 [WARN: ...] 标记
"""
import re
import uuid
@@ -94,28 +8,6 @@ from typing import Dict, List, Any, Tuple, Optional
Json = Dict[str, Any]
# ==================== 默认配置 ====================
# create_resource 节点默认参数
CREATE_RESOURCE_DEFAULTS = {
"device_id": "/PRCXI",
"parent_template": "/PRCXI/PRCXI_Deck/T{slot}", # {slot} 会被替换为实际的 slot 值
"class_name": "PRCXI_BioER_96_wellplate",
}
# 默认液体体积 (uL)
DEFAULT_LIQUID_VOLUME = 1e5
# 参数重命名映射:单数 -> 复数(用于 transfer_liquid 等动作)
PARAM_RENAME_MAPPING = {
"asp_vol": "asp_vols",
"dis_vol": "dis_vols",
"asp_flow_rate": "asp_flow_rates",
"dis_flow_rate": "dis_flow_rates",
}
# ---------------- Graph ----------------
@@ -336,7 +228,7 @@ def refactor_data(
def build_protocol_graph(
labware_info: Dict[str, Dict[str, Any]],
labware_info: List[Dict[str, Any]],
protocol_steps: List[Dict[str, Any]],
workstation_name: str,
action_resource_mapping: Optional[Dict[str, str]] = None,
@@ -344,227 +236,112 @@ def build_protocol_graph(
"""统一的协议图构建函数,根据设备类型自动选择构建逻辑
Args:
labware_info: labware 信息字典,格式为 {name: {slot, well, labware, ...}, ...}
labware_info: labware 信息字典
protocol_steps: 协议步骤列表
workstation_name: 工作站名称
action_resource_mapping: action 到 resource_name 的映射字典,可选
"""
G = WorkflowGraph()
resource_last_writer = {} # reagent_name -> "node_id:port"
slot_to_create_resource = {} # slot -> create_resource node_id
resource_last_writer = {}
protocol_steps = refactor_data(protocol_steps, action_resource_mapping)
# 有机化学&移液站协议图构建
WORKSTATION_ID = workstation_name
# ==================== 第一步:按 slot 去重创建 create_resource 节点 ====================
# 收集所有唯一的 slot
slots_info = {} # slot -> {labware, res_id}
for labware_id, item in labware_info.items():
slot = str(item.get("slot", ""))
if slot and slot not in slots_info:
res_id = f"plate_slot_{slot}"
slots_info[slot] = {
"labware": item.get("labware", ""),
"res_id": res_id,
}
# 为每个唯一的 slot 创建 create_resource 节点
# 为所有labware创建资源节点
res_index = 0
last_create_resource_id = None
for slot, info in slots_info.items():
for labware_id, item in labware_info.items():
# item_id = item.get("id") or item.get("name", f"item_{uuid.uuid4()}")
node_id = str(uuid.uuid4())
res_id = info["res_id"]
# 判断节点类型
if "Rack" in str(labware_id) or "Tip" in str(labware_id):
lab_node_type = "Labware"
description = f"Prepare Labware: {labware_id}"
liquid_type = []
liquid_volume = []
elif item.get("type") == "hardware" or "reactor" in str(labware_id).lower():
if "reactor" not in str(labware_id).lower():
continue
lab_node_type = "Sample"
description = f"Prepare Reactor: {labware_id}"
liquid_type = []
liquid_volume = []
else:
lab_node_type = "Reagent"
description = f"Add Reagent to Flask: {labware_id}"
liquid_type = [labware_id]
liquid_volume = [1e5]
res_index += 1
G.add_node(
node_id,
template_name="create_resource",
resource_name="host_node",
name=f"Plate {res_index}",
description=f"Create plate on slot {slot}",
lab_node_type="Labware",
name=f"Res {res_index}",
description=description,
lab_node_type=lab_node_type,
footer="create_resource-host_node",
param={
"res_id": res_id,
"device_id": CREATE_RESOURCE_DEFAULTS["device_id"],
"class_name": CREATE_RESOURCE_DEFAULTS["class_name"],
"parent": CREATE_RESOURCE_DEFAULTS["parent_template"].format(slot=slot),
"res_id": labware_id,
"device_id": WORKSTATION_ID,
"class_name": "container",
"parent": WORKSTATION_ID,
"bind_locations": {"x": 0.0, "y": 0.0, "z": 0.0},
"slot_on_deck": slot,
"liquid_input_slot": [-1],
"liquid_type": liquid_type,
"liquid_volume": liquid_volume,
"slot_on_deck": "",
},
)
slot_to_create_resource[slot] = node_id
resource_last_writer[labware_id] = f"{node_id}:labware"
# create_resource 之间通过 ready 串联
if last_create_resource_id is not None:
G.add_edge(last_create_resource_id, node_id, source_port="ready", target_port="ready")
last_create_resource_id = node_id
# ==================== 第二步:为每个 reagent 创建 set_liquid_from_plate 节点 ====================
set_liquid_index = 0
last_set_liquid_id = last_create_resource_id # set_liquid_from_plate 连接在 create_resource 之后
for labware_id, item in labware_info.items():
# 跳过 Tip/Rack 类型
if "Rack" in str(labware_id) or "Tip" in str(labware_id):
continue
if item.get("type") == "hardware":
continue
slot = str(item.get("slot", ""))
wells = item.get("well", [])
if not wells or not slot:
continue
# res_id 不能有空格
res_id = str(labware_id).replace(" ", "_")
well_count = len(wells)
node_id = str(uuid.uuid4())
set_liquid_index += 1
G.add_node(
node_id,
template_name="set_liquid_from_plate",
resource_name="liquid_handler.prcxi",
name=f"SetLiquid {set_liquid_index}",
description=f"Set liquid: {labware_id}",
lab_node_type="Reagent",
footer="set_liquid_from_plate-liquid_handler.prcxi",
param={
"plate": [], # 通过连接传递
"well_names": wells, # 孔位名数组,如 ["A1", "A3", "A5"]
"liquid_names": [res_id] * well_count,
"volumes": [DEFAULT_LIQUID_VOLUME] * well_count,
},
)
# ready 连接:上一个节点 -> set_liquid_from_plate
if last_set_liquid_id is not None:
G.add_edge(last_set_liquid_id, node_id, source_port="ready", target_port="ready")
last_set_liquid_id = node_id
# 物料流create_resource 的 labware -> set_liquid_from_plate 的 input_plate
create_res_node_id = slot_to_create_resource.get(slot)
if create_res_node_id:
G.add_edge(create_res_node_id, node_id, source_port="labware", target_port="input_plate")
# set_liquid_from_plate 的输出 output_wells 用于连接 transfer_liquid
resource_last_writer[labware_id] = f"{node_id}:output_wells"
last_control_node_id = last_set_liquid_id
# 端口名称映射JSON 字段名 -> 实际 handle key
INPUT_PORT_MAPPING = {
"sources": "sources_identifier",
"targets": "targets_identifier",
"vessel": "vessel",
"to_vessel": "to_vessel",
"from_vessel": "from_vessel",
"reagent": "reagent",
"solvent": "solvent",
"compound": "compound",
}
OUTPUT_PORT_MAPPING = {
"sources": "sources_out", # 输出端口是 xxx_out
"targets": "targets_out", # 输出端口是 xxx_out
"vessel": "vessel_out",
"to_vessel": "to_vessel_out",
"from_vessel": "from_vessel_out",
"filtrate_vessel": "filtrate_out",
"reagent": "reagent",
"solvent": "solvent",
"compound": "compound",
}
# 需要根据 wells 数量扩展的参数列表(复数形式)
EXPAND_BY_WELLS_PARAMS = ["asp_vols", "dis_vols", "asp_flow_rates", "dis_flow_rates"]
last_control_node_id = None
# 处理协议步骤
for step in protocol_steps:
node_id = str(uuid.uuid4())
params = step.get("param", {}).copy() # 复制一份,避免修改原数据
connected_params = set() # 记录被连接的参数
warnings = [] # 收集警告信息
# 参数重命名:单数 -> 复数
for old_name, new_name in PARAM_RENAME_MAPPING.items():
if old_name in params:
params[new_name] = params.pop(old_name)
# 处理输入连接
for param_key, target_port in INPUT_PORT_MAPPING.items():
resource_name = params.get(param_key)
if resource_name and resource_name in resource_last_writer:
source_node, source_port = resource_last_writer[resource_name].split(":")
G.add_edge(source_node, node_id, source_port=source_port, target_port=target_port)
connected_params.add(param_key)
elif resource_name and resource_name not in resource_last_writer:
# 资源名在 labware_info 中不存在
warnings.append(f"{param_key}={resource_name} 未找到")
# 获取 targets 对应的 wells 数量,用于扩展参数
targets_name = params.get("targets")
sources_name = params.get("sources")
targets_wells_count = 1
sources_wells_count = 1
if targets_name and targets_name in labware_info:
target_wells = labware_info[targets_name].get("well", [])
targets_wells_count = len(target_wells) if target_wells else 1
elif targets_name:
warnings.append(f"targets={targets_name} 未在 reagent 中定义")
if sources_name and sources_name in labware_info:
source_wells = labware_info[sources_name].get("well", [])
sources_wells_count = len(source_wells) if source_wells else 1
elif sources_name:
warnings.append(f"sources={sources_name} 未在 reagent 中定义")
# 检查 sources 和 targets 的 wells 数量是否匹配
if targets_wells_count != sources_wells_count and targets_name and sources_name:
warnings.append(f"wells 数量不匹配: sources={sources_wells_count}, targets={targets_wells_count}")
# 使用 targets 的 wells 数量来扩展参数
wells_count = targets_wells_count
# 扩展单值参数为数组(根据 targets 的 wells 数量)
for expand_param in EXPAND_BY_WELLS_PARAMS:
if expand_param in params:
value = params[expand_param]
# 如果是单个值,扩展为数组
if not isinstance(value, list):
params[expand_param] = [value] * wells_count
# 如果已经是数组但长度不对,记录警告
elif len(value) != wells_count:
warnings.append(f"{expand_param} 数量({len(value)})与 wells({wells_count})不匹配")
# 如果 sources/targets 已通过连接传递,将参数值改为空数组
for param_key in connected_params:
if param_key in params:
params[param_key] = []
# 更新 step 的 param 和 footer
step_copy = step.copy()
step_copy["param"] = params
# 如果有警告,修改 footer 添加警告标记(警告放前面)
if warnings:
original_footer = step.get("footer", "")
step_copy["footer"] = f"[WARN: {'; '.join(warnings)}] {original_footer}"
G.add_node(node_id, **step_copy)
G.add_node(node_id, **step)
# 控制流
if last_control_node_id is not None:
G.add_edge(last_control_node_id, node_id, source_port="ready", target_port="ready")
last_control_node_id = node_id
# 处理输出:更新 resource_last_writer
for param_key, output_port in OUTPUT_PORT_MAPPING.items():
resource_name = step.get("param", {}).get(param_key) # 使用原始参数值
# 物料流
params = step.get("param", {})
input_resources_possible_names = [
"vessel",
"to_vessel",
"from_vessel",
"reagent",
"solvent",
"compound",
"sources",
"targets",
]
for target_port in input_resources_possible_names:
resource_name = params.get(target_port)
if resource_name and resource_name in resource_last_writer:
source_node, source_port = resource_last_writer[resource_name].split(":")
G.add_edge(source_node, node_id, source_port=source_port, target_port=target_port)
output_resources = {
"vessel_out": params.get("vessel"),
"from_vessel_out": params.get("from_vessel"),
"to_vessel_out": params.get("to_vessel"),
"filtrate_out": params.get("filtrate_vessel"),
"reagent": params.get("reagent"),
"solvent": params.get("solvent"),
"compound": params.get("compound"),
"sources_out": params.get("sources"),
"targets_out": params.get("targets"),
}
for source_port, resource_name in output_resources.items():
if resource_name:
resource_last_writer[resource_name] = f"{node_id}:{output_port}"
resource_last_writer[resource_name] = f"{node_id}:{source_port}"
return G

View File

@@ -1,68 +1,21 @@
"""
JSON 工作流转换模块
将 workflow/reagent 格式的 JSON 转换为统一工作流格式。
输入格式:
{
"workflow": [
{"action": "...", "action_args": {...}},
...
],
"reagent": {
"reagent_name": {"slot": int, "well": [...], "labware": "..."},
...
}
}
提供从多种 JSON 格式转换为统一工作流格式的功能
支持的格式:
1. workflow/reagent 格式
2. steps_info/labware_info 格式
"""
import json
from os import PathLike
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from unilabos.workflow.common import WorkflowGraph, build_protocol_graph
from unilabos.registry.registry import lab_registry
# ==================== 字段映射配置 ====================
# action 到 resource_name 的映射
ACTION_RESOURCE_MAPPING: Dict[str, str] = {
# 生物实验操作
"transfer_liquid": "liquid_handler.prcxi",
"transfer": "liquid_handler.prcxi",
"incubation": "incubator.prcxi",
"move_labware": "labware_mover.prcxi",
"oscillation": "shaker.prcxi",
# 有机化学操作
"HeatChillToTemp": "heatchill.chemputer",
"StopHeatChill": "heatchill.chemputer",
"StartHeatChill": "heatchill.chemputer",
"HeatChill": "heatchill.chemputer",
"Dissolve": "stirrer.chemputer",
"Transfer": "liquid_handler.chemputer",
"Evaporate": "rotavap.chemputer",
"Recrystallize": "reactor.chemputer",
"Filter": "filter.chemputer",
"Dry": "dryer.chemputer",
"Add": "liquid_handler.chemputer",
}
# action_args 字段到 parameters 字段的映射
# 格式: {"old_key": "new_key"}, 仅映射需要重命名的字段
ARGS_FIELD_MAPPING: Dict[str, str] = {
# 如果需要字段重命名,在这里配置
# "old_field_name": "new_field_name",
}
# 默认工作站名称
DEFAULT_WORKSTATION = "PRCXI"
# ==================== 核心转换函数 ====================
def get_action_handles(resource_name: str, template_name: str) -> Dict[str, List[str]]:
"""
从 registry 获取指定设备和动作的 handles 配置
@@ -86,10 +39,12 @@ def get_action_handles(resource_name: str, template_name: str) -> Dict[str, List
handles = action_config.get("handles", {})
if isinstance(handles, dict):
# 处理 input handles (作为 target)
for handle in handles.get("input", []):
handler_key = handle.get("handler_key", "")
if handler_key:
result["source"].append(handler_key)
# 处理 output handles (作为 source)
for handle in handles.get("output", []):
handler_key = handle.get("handler_key", "")
if handler_key:
@@ -114,9 +69,12 @@ def validate_workflow_handles(graph: WorkflowGraph) -> Tuple[bool, List[str]]:
for edge in graph.edges:
left_uuid = edge.get("source")
right_uuid = edge.get("target")
# target_handle_key是target, right的输入节点入节点
# source_handle_key是source, left的输出节点出节点
right_source_conn_key = edge.get("target_handle_key", "")
left_target_conn_key = edge.get("source_handle_key", "")
# 获取源节点和目标节点信息
left_node = nodes.get(left_uuid, {})
right_node = nodes.get(right_uuid, {})
@@ -125,93 +83,164 @@ def validate_workflow_handles(graph: WorkflowGraph) -> Tuple[bool, List[str]]:
right_res_name = right_node.get("resource_name", "")
right_template_name = right_node.get("template_name", "")
# 获取源节点的 output handles
left_node_handles = get_action_handles(left_res_name, left_template_name)
target_valid_keys = left_node_handles.get("target", [])
target_valid_keys.append("ready")
# 获取目标节点的 input handles
right_node_handles = get_action_handles(right_res_name, right_template_name)
source_valid_keys = right_node_handles.get("source", [])
source_valid_keys.append("ready")
# 验证目标节点right的输入端口
# 如果节点配置了 output handles则 source_port 必须有效
if not right_source_conn_key:
node_name = right_node.get("name", right_uuid[:8])
errors.append(f"目标节点 '{node_name}'输入端口 (target_handle_key) 为空,应设置为: {source_valid_keys}")
node_name = left_node.get("name", left_uuid[:8])
errors.append(f"节点 '{node_name}' source_handle_key 为空," f"应设置为: {source_valid_keys}")
elif right_source_conn_key not in source_valid_keys:
node_name = right_node.get("name", right_uuid[:8])
node_name = left_node.get("name", left_uuid[:8])
errors.append(
f"目标节点 '{node_name}'输入端口 '{right_source_conn_key}' 不存在,支持的输入端口: {source_valid_keys}"
f"节点 '{node_name}' source 端点 '{right_source_conn_key}' 不存在," f"支持的端点: {source_valid_keys}"
)
# 验证源节点left的输出端口
# 如果节点配置了 input handles则 target_port 必须有效
if not left_target_conn_key:
node_name = left_node.get("name", left_uuid[:8])
errors.append(f"节点 '{node_name}'输出端口 (source_handle_key) 为空,应设置为: {target_valid_keys}")
node_name = right_node.get("name", right_uuid[:8])
errors.append(f"目标节点 '{node_name}' target_handle_key 为空," f"应设置为: {target_valid_keys}")
elif left_target_conn_key not in target_valid_keys:
node_name = left_node.get("name", left_uuid[:8])
node_name = right_node.get("name", right_uuid[:8])
errors.append(
f"节点 '{node_name}'输出端口 '{left_target_conn_key}' 不存在,支持的输出端口: {target_valid_keys}"
f"目标节点 '{node_name}' target 端点 '{left_target_conn_key}' 不存在,"
f"支持的端点: {target_valid_keys}"
)
return len(errors) == 0, errors
def normalize_workflow_steps(workflow: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
# action 到 resource_name 的映射
ACTION_RESOURCE_MAPPING: Dict[str, str] = {
# 生物实验操作
"transfer_liquid": "liquid_handler.prcxi",
"transfer": "liquid_handler.prcxi",
"incubation": "incubator.prcxi",
"move_labware": "labware_mover.prcxi",
"oscillation": "shaker.prcxi",
# 有机化学操作
"HeatChillToTemp": "heatchill.chemputer",
"StopHeatChill": "heatchill.chemputer",
"StartHeatChill": "heatchill.chemputer",
"HeatChill": "heatchill.chemputer",
"Dissolve": "stirrer.chemputer",
"Transfer": "liquid_handler.chemputer",
"Evaporate": "rotavap.chemputer",
"Recrystallize": "reactor.chemputer",
"Filter": "filter.chemputer",
"Dry": "dryer.chemputer",
"Add": "liquid_handler.chemputer",
}
def normalize_steps(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
workflow 格式的步骤数据规范化
不同格式的步骤数据规范化为统一格式
输入格式:
[{"action": "...", "action_args": {...}}, ...]
输出格式:
[{"action": "...", "parameters": {...}, "step_number": int}, ...]
支持的输入格式
- action + parameters
- action + action_args
- operation + parameters
Args:
workflow: workflow 数组
data: 原始步骤数据列表
Returns:
规范化后的步骤列表
规范化后的步骤列表,格式为 [{"action": str, "parameters": dict, "description": str?, "step_number": int?}, ...]
"""
normalized = []
for idx, step in enumerate(workflow):
action = step.get("action")
for idx, step in enumerate(data):
# 获取动作名称(支持 action 或 operation 字段)
action = step.get("action") or step.get("operation")
if not action:
continue
# 获取参数: action_args
raw_params = step.get("action_args", {})
params = {}
# 获取参数(支持 parameters 或 action_args 字段)
raw_params = step.get("parameters") or step.get("action_args") or {}
params = dict(raw_params)
# 应用字段映射
for key, value in raw_params.items():
mapped_key = ARGS_FIELD_MAPPING.get(key, key)
params[mapped_key] = value
# 规范化 source/target -> sources/targets
if "source" in raw_params and "sources" not in raw_params:
params["sources"] = raw_params["source"]
if "target" in raw_params and "targets" not in raw_params:
params["targets"] = raw_params["target"]
step_dict = {
"action": action,
"parameters": params,
"step_number": idx + 1,
}
# 获取描述(支持 description 或 purpose 字段)
description = step.get("description") or step.get("purpose")
# 保留描述字段
if "description" in step:
step_dict["description"] = step["description"]
# 获取步骤编号(优先使用原始数据中的 step_number否则使用索引+1
step_number = step.get("step_number", idx + 1)
step_dict = {"action": action, "parameters": params, "step_number": step_number}
if description:
step_dict["description"] = description
normalized.append(step_dict)
return normalized
def normalize_labware(data: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
"""
将不同格式的 labware 数据规范化为统一的字典格式
支持的输入格式:
- reagent_name + material_name + positions
- name + labware + slot
Args:
data: 原始 labware 数据列表
Returns:
规范化后的 labware 字典,格式为 {name: {"slot": int, "labware": str, "well": list, "type": str, "role": str, "name": str}, ...}
"""
labware = {}
for item in data:
# 获取 key 名称(优先使用 reagent_name其次是 material_name 或 name
reagent_name = item.get("reagent_name")
key = reagent_name or item.get("material_name") or item.get("name")
if not key:
continue
key = str(key)
# 处理重复 key自动添加后缀
idx = 1
original_key = key
while key in labware:
idx += 1
key = f"{original_key}_{idx}"
labware[key] = {
"slot": item.get("positions") or item.get("slot"),
"labware": item.get("material_name") or item.get("labware"),
"well": item.get("well", []),
"type": item.get("type", "reagent"),
"role": item.get("role", ""),
"name": key,
}
return labware
def convert_from_json(
data: Union[str, PathLike, Dict[str, Any]],
workstation_name: str = DEFAULT_WORKSTATION,
workstation_name: str = "PRCXi",
validate: bool = True,
) -> WorkflowGraph:
"""
从 JSON 数据或文件转换为 WorkflowGraph
JSON 格式:
{"workflow": [...], "reagent": {...}}
支持的 JSON 格式
1. {"workflow": [...], "reagent": {...}} - 直接格式
2. {"steps_info": [...], "labware_info": [...]} - 需要规范化的格式
Args:
data: JSON 文件路径、字典数据、或 JSON 字符串
@@ -222,7 +251,7 @@ def convert_from_json(
WorkflowGraph: 构建好的工作流图
Raises:
ValueError: 不支持的 JSON 格式
ValueError: 不支持的 JSON 格式 或 句柄校验失败
FileNotFoundError: 文件不存在
json.JSONDecodeError: JSON 解析失败
"""
@@ -233,6 +262,7 @@ def convert_from_json(
with path.open("r", encoding="utf-8") as fp:
json_data = json.load(fp)
elif isinstance(data, str):
# 尝试作为 JSON 字符串解析
json_data = json.loads(data)
else:
raise FileNotFoundError(f"文件不存在: {data}")
@@ -241,24 +271,30 @@ def convert_from_json(
else:
raise TypeError(f"不支持的数据类型: {type(data)}")
# 校验格式
if "workflow" not in json_data or "reagent" not in json_data:
# 根据格式解析数据
if "workflow" in json_data and "reagent" in json_data:
# 格式1: workflow/reagent已经是规范格式
protocol_steps = json_data["workflow"]
labware_info = json_data["reagent"]
elif "steps_info" in json_data and "labware_info" in json_data:
# 格式2: steps_info/labware_info需要规范化
protocol_steps = normalize_steps(json_data["steps_info"])
labware_info = normalize_labware(json_data["labware_info"])
elif "steps" in json_data and "labware" in json_data:
# 格式3: steps/labware另一种常见格式
protocol_steps = normalize_steps(json_data["steps"])
if isinstance(json_data["labware"], list):
labware_info = normalize_labware(json_data["labware"])
else:
labware_info = json_data["labware"]
else:
raise ValueError(
"不支持的 JSON 格式。请使用标准格式:\n"
'{"workflow": [{"action": "...", "action_args": {...}}, ...], '
'"reagent": {"name": {"slot": int, "well": [...], "labware": "..."}, ...}}'
"不支持的 JSON 格式。支持的格式\n"
"1. {'workflow': [...], 'reagent': {...}}\n"
"2. {'steps_info': [...], 'labware_info': [...]}\n"
"3. {'steps': [...], 'labware': [...]}"
)
# 提取数据
workflow = json_data["workflow"]
reagent = json_data["reagent"]
# 规范化步骤数据
protocol_steps = normalize_workflow_steps(workflow)
# reagent 已经是字典格式,直接使用
labware_info = reagent
# 构建工作流图
graph = build_protocol_graph(
labware_info=labware_info,
@@ -281,7 +317,7 @@ def convert_from_json(
def convert_json_to_node_link(
data: Union[str, PathLike, Dict[str, Any]],
workstation_name: str = DEFAULT_WORKSTATION,
workstation_name: str = "PRCXi",
) -> Dict[str, Any]:
"""
将 JSON 数据转换为 node-link 格式的字典
@@ -299,7 +335,7 @@ def convert_json_to_node_link(
def convert_json_to_workflow_list(
data: Union[str, PathLike, Dict[str, Any]],
workstation_name: str = DEFAULT_WORKSTATION,
workstation_name: str = "PRCXi",
) -> List[Dict[str, Any]]:
"""
将 JSON 数据转换为工作流列表格式
@@ -313,3 +349,8 @@ def convert_json_to_workflow_list(
"""
graph = convert_from_json(data, workstation_name)
return graph.to_dict()
# 为了向后兼容,保留下划线前缀的别名
_normalize_steps = normalize_steps
_normalize_labware = normalize_labware

View File

@@ -1,356 +0,0 @@
"""
JSON 工作流转换模块
提供从多种 JSON 格式转换为统一工作流格式的功能。
支持的格式:
1. workflow/reagent 格式
2. steps_info/labware_info 格式
"""
import json
from os import PathLike
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from unilabos.workflow.common import WorkflowGraph, build_protocol_graph
from unilabos.registry.registry import lab_registry
def get_action_handles(resource_name: str, template_name: str) -> Dict[str, List[str]]:
"""
从 registry 获取指定设备和动作的 handles 配置
Args:
resource_name: 设备资源名称,如 "liquid_handler.prcxi"
template_name: 动作模板名称,如 "transfer_liquid"
Returns:
包含 source 和 target handler_keys 的字典:
{"source": ["sources_out", "targets_out", ...], "target": ["sources", "targets", ...]}
"""
result = {"source": [], "target": []}
device_info = lab_registry.device_type_registry.get(resource_name, {})
if not device_info:
return result
action_mappings = device_info.get("class", {}).get("action_value_mappings", {})
action_config = action_mappings.get(template_name, {})
handles = action_config.get("handles", {})
if isinstance(handles, dict):
# 处理 input handles (作为 target)
for handle in handles.get("input", []):
handler_key = handle.get("handler_key", "")
if handler_key:
result["source"].append(handler_key)
# 处理 output handles (作为 source)
for handle in handles.get("output", []):
handler_key = handle.get("handler_key", "")
if handler_key:
result["target"].append(handler_key)
return result
def validate_workflow_handles(graph: WorkflowGraph) -> Tuple[bool, List[str]]:
"""
校验工作流图中所有边的句柄配置是否正确
Args:
graph: 工作流图对象
Returns:
(is_valid, errors): 是否有效,错误信息列表
"""
errors = []
nodes = graph.nodes
for edge in graph.edges:
left_uuid = edge.get("source")
right_uuid = edge.get("target")
# target_handle_key是target, right的输入节点入节点
# source_handle_key是source, left的输出节点出节点
right_source_conn_key = edge.get("target_handle_key", "")
left_target_conn_key = edge.get("source_handle_key", "")
# 获取源节点和目标节点信息
left_node = nodes.get(left_uuid, {})
right_node = nodes.get(right_uuid, {})
left_res_name = left_node.get("resource_name", "")
left_template_name = left_node.get("template_name", "")
right_res_name = right_node.get("resource_name", "")
right_template_name = right_node.get("template_name", "")
# 获取源节点的 output handles
left_node_handles = get_action_handles(left_res_name, left_template_name)
target_valid_keys = left_node_handles.get("target", [])
target_valid_keys.append("ready")
# 获取目标节点的 input handles
right_node_handles = get_action_handles(right_res_name, right_template_name)
source_valid_keys = right_node_handles.get("source", [])
source_valid_keys.append("ready")
# 如果节点配置了 output handles则 source_port 必须有效
if not right_source_conn_key:
node_name = left_node.get("name", left_uuid[:8])
errors.append(f"源节点 '{node_name}' 的 source_handle_key 为空," f"应设置为: {source_valid_keys}")
elif right_source_conn_key not in source_valid_keys:
node_name = left_node.get("name", left_uuid[:8])
errors.append(
f"源节点 '{node_name}' 的 source 端点 '{right_source_conn_key}' 不存在," f"支持的端点: {source_valid_keys}"
)
# 如果节点配置了 input handles则 target_port 必须有效
if not left_target_conn_key:
node_name = right_node.get("name", right_uuid[:8])
errors.append(f"目标节点 '{node_name}' 的 target_handle_key 为空," f"应设置为: {target_valid_keys}")
elif left_target_conn_key not in target_valid_keys:
node_name = right_node.get("name", right_uuid[:8])
errors.append(
f"目标节点 '{node_name}' 的 target 端点 '{left_target_conn_key}' 不存在,"
f"支持的端点: {target_valid_keys}"
)
return len(errors) == 0, errors
# action 到 resource_name 的映射
ACTION_RESOURCE_MAPPING: Dict[str, str] = {
# 生物实验操作
"transfer_liquid": "liquid_handler.prcxi",
"transfer": "liquid_handler.prcxi",
"incubation": "incubator.prcxi",
"move_labware": "labware_mover.prcxi",
"oscillation": "shaker.prcxi",
# 有机化学操作
"HeatChillToTemp": "heatchill.chemputer",
"StopHeatChill": "heatchill.chemputer",
"StartHeatChill": "heatchill.chemputer",
"HeatChill": "heatchill.chemputer",
"Dissolve": "stirrer.chemputer",
"Transfer": "liquid_handler.chemputer",
"Evaporate": "rotavap.chemputer",
"Recrystallize": "reactor.chemputer",
"Filter": "filter.chemputer",
"Dry": "dryer.chemputer",
"Add": "liquid_handler.chemputer",
}
def normalize_steps(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""
将不同格式的步骤数据规范化为统一格式
支持的输入格式:
- action + parameters
- action + action_args
- operation + parameters
Args:
data: 原始步骤数据列表
Returns:
规范化后的步骤列表,格式为 [{"action": str, "parameters": dict, "description": str?, "step_number": int?}, ...]
"""
normalized = []
for idx, step in enumerate(data):
# 获取动作名称(支持 action 或 operation 字段)
action = step.get("action") or step.get("operation")
if not action:
continue
# 获取参数(支持 parameters 或 action_args 字段)
raw_params = step.get("parameters") or step.get("action_args") or {}
params = dict(raw_params)
# 规范化 source/target -> sources/targets
if "source" in raw_params and "sources" not in raw_params:
params["sources"] = raw_params["source"]
if "target" in raw_params and "targets" not in raw_params:
params["targets"] = raw_params["target"]
# 获取描述(支持 description 或 purpose 字段)
description = step.get("description") or step.get("purpose")
# 获取步骤编号(优先使用原始数据中的 step_number否则使用索引+1
step_number = step.get("step_number", idx + 1)
step_dict = {"action": action, "parameters": params, "step_number": step_number}
if description:
step_dict["description"] = description
normalized.append(step_dict)
return normalized
def normalize_labware(data: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
"""
将不同格式的 labware 数据规范化为统一的字典格式
支持的输入格式:
- reagent_name + material_name + positions
- name + labware + slot
Args:
data: 原始 labware 数据列表
Returns:
规范化后的 labware 字典,格式为 {name: {"slot": int, "labware": str, "well": list, "type": str, "role": str, "name": str}, ...}
"""
labware = {}
for item in data:
# 获取 key 名称(优先使用 reagent_name其次是 material_name 或 name
reagent_name = item.get("reagent_name")
key = reagent_name or item.get("material_name") or item.get("name")
if not key:
continue
key = str(key)
# 处理重复 key自动添加后缀
idx = 1
original_key = key
while key in labware:
idx += 1
key = f"{original_key}_{idx}"
labware[key] = {
"slot": item.get("positions") or item.get("slot"),
"labware": item.get("material_name") or item.get("labware"),
"well": item.get("well", []),
"type": item.get("type", "reagent"),
"role": item.get("role", ""),
"name": key,
}
return labware
def convert_from_json(
data: Union[str, PathLike, Dict[str, Any]],
workstation_name: str = "PRCXi",
validate: bool = True,
) -> WorkflowGraph:
"""
从 JSON 数据或文件转换为 WorkflowGraph
支持的 JSON 格式:
1. {"workflow": [...], "reagent": {...}} - 直接格式
2. {"steps_info": [...], "labware_info": [...]} - 需要规范化的格式
Args:
data: JSON 文件路径、字典数据、或 JSON 字符串
workstation_name: 工作站名称,默认 "PRCXi"
validate: 是否校验句柄配置,默认 True
Returns:
WorkflowGraph: 构建好的工作流图
Raises:
ValueError: 不支持的 JSON 格式 或 句柄校验失败
FileNotFoundError: 文件不存在
json.JSONDecodeError: JSON 解析失败
"""
# 处理输入数据
if isinstance(data, (str, PathLike)):
path = Path(data)
if path.exists():
with path.open("r", encoding="utf-8") as fp:
json_data = json.load(fp)
elif isinstance(data, str):
# 尝试作为 JSON 字符串解析
json_data = json.loads(data)
else:
raise FileNotFoundError(f"文件不存在: {data}")
elif isinstance(data, dict):
json_data = data
else:
raise TypeError(f"不支持的数据类型: {type(data)}")
# 根据格式解析数据
if "workflow" in json_data and "reagent" in json_data:
# 格式1: workflow/reagent已经是规范格式
protocol_steps = json_data["workflow"]
labware_info = json_data["reagent"]
elif "steps_info" in json_data and "labware_info" in json_data:
# 格式2: steps_info/labware_info需要规范化
protocol_steps = normalize_steps(json_data["steps_info"])
labware_info = normalize_labware(json_data["labware_info"])
elif "steps" in json_data and "labware" in json_data:
# 格式3: steps/labware另一种常见格式
protocol_steps = normalize_steps(json_data["steps"])
if isinstance(json_data["labware"], list):
labware_info = normalize_labware(json_data["labware"])
else:
labware_info = json_data["labware"]
else:
raise ValueError(
"不支持的 JSON 格式。支持的格式:\n"
"1. {'workflow': [...], 'reagent': {...}}\n"
"2. {'steps_info': [...], 'labware_info': [...]}\n"
"3. {'steps': [...], 'labware': [...]}"
)
# 构建工作流图
graph = build_protocol_graph(
labware_info=labware_info,
protocol_steps=protocol_steps,
workstation_name=workstation_name,
action_resource_mapping=ACTION_RESOURCE_MAPPING,
)
# 校验句柄配置
if validate:
is_valid, errors = validate_workflow_handles(graph)
if not is_valid:
import warnings
for error in errors:
warnings.warn(f"句柄校验警告: {error}")
return graph
def convert_json_to_node_link(
data: Union[str, PathLike, Dict[str, Any]],
workstation_name: str = "PRCXi",
) -> Dict[str, Any]:
"""
将 JSON 数据转换为 node-link 格式的字典
Args:
data: JSON 文件路径、字典数据、或 JSON 字符串
workstation_name: 工作站名称,默认 "PRCXi"
Returns:
Dict: node-link 格式的工作流数据
"""
graph = convert_from_json(data, workstation_name)
return graph.to_node_link_dict()
def convert_json_to_workflow_list(
data: Union[str, PathLike, Dict[str, Any]],
workstation_name: str = "PRCXi",
) -> List[Dict[str, Any]]:
"""
将 JSON 数据转换为工作流列表格式
Args:
data: JSON 文件路径、字典数据、或 JSON 字符串
workstation_name: 工作站名称,默认 "PRCXi"
Returns:
List: 工作流节点列表
"""
graph = convert_from_json(data, workstation_name)
return graph.to_dict()
# 为了向后兼容,保留下划线前缀的别名
_normalize_steps = normalize_steps
_normalize_labware = normalize_labware

View File

@@ -2,7 +2,7 @@
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>unilabos_msgs</name>
<version>0.10.17</version>
<version>0.10.15</version>
<description>ROS2 Messages package for unilabos devices</description>
<maintainer email="changjh@pku.edu.cn">Junhan Chang</maintainer>
<maintainer email="18435084+Xuwznln@users.noreply.github.com">Xuwznln</maintainer>