mirror of
https://github.com/dptech-corp/Uni-Lab-OS.git
synced 2026-02-07 23:45:10 +00:00
Compare commits
88 Commits
6f600b4fc7
...
prcix9320
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f6d46e669d | ||
|
|
abf5555e37 | ||
|
|
4f7d431c0b | ||
|
|
341a1b537c | ||
|
|
957fb41a6f | ||
|
|
26271bcab8 | ||
|
|
e4d915c59c | ||
|
|
11a38d4558 | ||
|
|
84a8223173 | ||
|
|
e8d1263488 | ||
|
|
380b39100d | ||
|
|
56eb7e2ab4 | ||
|
|
23ce145f74 | ||
|
|
b0da149252 | ||
|
|
07c9e6f0fe | ||
|
|
ccec6b9d77 | ||
|
|
dadfdf3d8d | ||
|
|
aeeb36d075 | ||
|
|
3478bfd7ed | ||
|
|
400bb073d4 | ||
|
|
3f63c36505 | ||
|
|
0ae94f7f3c | ||
|
|
7eacae6442 | ||
|
|
f7d2cb4b9e | ||
|
|
bf980d7248 | ||
|
|
27c0544bfc | ||
|
|
d48e77c9ae | ||
|
|
e70a5bea66 | ||
|
|
467d75dc03 | ||
|
|
9feeb0c430 | ||
|
|
b2f26ffb28 | ||
|
|
4b0d1553e9 | ||
|
|
67ddee2ab2 | ||
|
|
1bcdad9448 | ||
|
|
039c96fe01 | ||
|
|
e1555d10a0 | ||
|
|
f2a96b2041 | ||
|
|
329349639e | ||
|
|
e4cc111523 | ||
|
|
d245ceef1b | ||
|
|
6db7fbd721 | ||
|
|
ab05b858e1 | ||
|
|
43e4c71a8e | ||
|
|
d6910da57d | ||
|
|
2cf58ca452 | ||
|
|
fd73bb7dcb | ||
|
|
a02cecfd18 | ||
|
|
d6accc3f1c | ||
|
|
39dc443399 | ||
|
|
37b1fca962 | ||
|
|
216f19fb62 | ||
|
|
d5b4f07406 | ||
|
|
470d7283e4 | ||
|
|
03f7f44c77 | ||
|
|
ec7ca6a1fe | ||
|
|
4c8022ee95 | ||
|
|
ad21644db0 | ||
|
|
9dfd58e9af | ||
|
|
31c9f9a172 | ||
|
|
02cd8de4c5 | ||
|
|
a66603ec1c | ||
|
|
ec015e16cd | ||
|
|
965bf36e8d | ||
|
|
aacf3497e0 | ||
|
|
657f952e7a | ||
|
|
0165590290 | ||
|
|
daea1ab54d | ||
|
|
93cb307396 | ||
|
|
1c312772ae | ||
|
|
bad1db5094 | ||
|
|
f26eb69eca | ||
|
|
12c0770c92 | ||
|
|
3d2d428a96 | ||
|
|
78bf57f590 | ||
|
|
e227cddab3 | ||
|
|
f2b993643f | ||
|
|
2e14bf197c | ||
|
|
66c18c080a | ||
|
|
a1c34f138e | ||
|
|
75bb5ec553 | ||
|
|
bb95c89829 | ||
|
|
394c140830 | ||
|
|
e6d8d41183 | ||
|
|
847a300af3 | ||
|
|
a201d7c307 | ||
|
|
3433766bc5 | ||
|
|
7e9e93b29c | ||
|
|
9e1e6da505 |
60
.conda/base/recipe.yaml
Normal file
60
.conda/base/recipe.yaml
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
# unilabos: Production package (depends on unilabos-env + pip unilabos)
|
||||||
|
# For production deployment
|
||||||
|
|
||||||
|
package:
|
||||||
|
name: unilabos
|
||||||
|
version: 0.10.17
|
||||||
|
|
||||||
|
source:
|
||||||
|
path: ../../unilabos
|
||||||
|
target_directory: unilabos
|
||||||
|
|
||||||
|
build:
|
||||||
|
python:
|
||||||
|
entry_points:
|
||||||
|
- unilab = unilabos.app.main:main
|
||||||
|
script:
|
||||||
|
- set PIP_NO_INDEX=
|
||||||
|
- if: win
|
||||||
|
then:
|
||||||
|
- copy %RECIPE_DIR%\..\..\MANIFEST.in %SRC_DIR%
|
||||||
|
- copy %RECIPE_DIR%\..\..\setup.cfg %SRC_DIR%
|
||||||
|
- copy %RECIPE_DIR%\..\..\setup.py %SRC_DIR%
|
||||||
|
- pip install %SRC_DIR%
|
||||||
|
- if: unix
|
||||||
|
then:
|
||||||
|
- cp $RECIPE_DIR/../../MANIFEST.in $SRC_DIR
|
||||||
|
- cp $RECIPE_DIR/../../setup.cfg $SRC_DIR
|
||||||
|
- cp $RECIPE_DIR/../../setup.py $SRC_DIR
|
||||||
|
- pip install $SRC_DIR
|
||||||
|
|
||||||
|
requirements:
|
||||||
|
host:
|
||||||
|
- python ==3.11.14
|
||||||
|
- pip
|
||||||
|
- setuptools
|
||||||
|
- zstd
|
||||||
|
- zstandard
|
||||||
|
run:
|
||||||
|
- zstd
|
||||||
|
- zstandard
|
||||||
|
- networkx
|
||||||
|
- typing_extensions
|
||||||
|
- websockets
|
||||||
|
- pint
|
||||||
|
- fastapi
|
||||||
|
- jinja2
|
||||||
|
- requests
|
||||||
|
- uvicorn
|
||||||
|
- opcua # [not osx]
|
||||||
|
- pyserial
|
||||||
|
- pandas
|
||||||
|
- pymodbus
|
||||||
|
- matplotlib
|
||||||
|
- pylibftdi
|
||||||
|
- uni-lab::unilabos-env ==0.10.17
|
||||||
|
|
||||||
|
about:
|
||||||
|
repository: https://github.com/deepmodeling/Uni-Lab-OS
|
||||||
|
license: GPL-3.0-only
|
||||||
|
description: "UniLabOS - Production package with minimal ROS2 dependencies"
|
||||||
39
.conda/environment/recipe.yaml
Normal file
39
.conda/environment/recipe.yaml
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# unilabos-env: conda environment dependencies (ROS2 + conda packages)
|
||||||
|
|
||||||
|
package:
|
||||||
|
name: unilabos-env
|
||||||
|
version: 0.10.17
|
||||||
|
|
||||||
|
build:
|
||||||
|
noarch: generic
|
||||||
|
|
||||||
|
requirements:
|
||||||
|
run:
|
||||||
|
# Python
|
||||||
|
- zstd
|
||||||
|
- zstandard
|
||||||
|
- conda-forge::python ==3.11.14
|
||||||
|
- conda-forge::opencv
|
||||||
|
# ROS2 dependencies (from ci-check.yml)
|
||||||
|
- robostack-staging::ros-humble-ros-core
|
||||||
|
- robostack-staging::ros-humble-action-msgs
|
||||||
|
- robostack-staging::ros-humble-std-msgs
|
||||||
|
- robostack-staging::ros-humble-geometry-msgs
|
||||||
|
- robostack-staging::ros-humble-control-msgs
|
||||||
|
- robostack-staging::ros-humble-nav2-msgs
|
||||||
|
- robostack-staging::ros-humble-cv-bridge
|
||||||
|
- robostack-staging::ros-humble-vision-opencv
|
||||||
|
- robostack-staging::ros-humble-tf-transformations
|
||||||
|
- robostack-staging::ros-humble-moveit-msgs
|
||||||
|
- robostack-staging::ros-humble-tf2-ros
|
||||||
|
- robostack-staging::ros-humble-tf2-ros-py
|
||||||
|
- conda-forge::transforms3d
|
||||||
|
- conda-forge::uv
|
||||||
|
|
||||||
|
# UniLabOS custom messages
|
||||||
|
- uni-lab::ros-humble-unilabos-msgs
|
||||||
|
|
||||||
|
about:
|
||||||
|
repository: https://github.com/deepmodeling/Uni-Lab-OS
|
||||||
|
license: GPL-3.0-only
|
||||||
|
description: "UniLabOS Environment - ROS2 and conda dependencies"
|
||||||
42
.conda/full/recipe.yaml
Normal file
42
.conda/full/recipe.yaml
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# unilabos-full: Full package with all features
|
||||||
|
# Depends on unilabos + complete ROS2 desktop + dev tools
|
||||||
|
|
||||||
|
package:
|
||||||
|
name: unilabos-full
|
||||||
|
version: 0.10.17
|
||||||
|
|
||||||
|
build:
|
||||||
|
noarch: generic
|
||||||
|
|
||||||
|
requirements:
|
||||||
|
run:
|
||||||
|
# Base unilabos package (includes unilabos-env)
|
||||||
|
- uni-lab::unilabos ==0.10.17
|
||||||
|
# Documentation tools
|
||||||
|
- sphinx
|
||||||
|
- sphinx_rtd_theme
|
||||||
|
# Web UI
|
||||||
|
- gradio
|
||||||
|
- flask
|
||||||
|
# Interactive development
|
||||||
|
- ipython
|
||||||
|
- jupyter
|
||||||
|
- jupyros
|
||||||
|
- colcon-common-extensions
|
||||||
|
# ROS2 full desktop (includes rviz2, gazebo, etc.)
|
||||||
|
- robostack-staging::ros-humble-desktop-full
|
||||||
|
# Navigation and motion control
|
||||||
|
- ros-humble-navigation2
|
||||||
|
- ros-humble-ros2-control
|
||||||
|
- ros-humble-robot-state-publisher
|
||||||
|
- ros-humble-joint-state-publisher
|
||||||
|
# MoveIt motion planning
|
||||||
|
- ros-humble-moveit
|
||||||
|
- ros-humble-moveit-servo
|
||||||
|
# Simulation
|
||||||
|
- ros-humble-simulation
|
||||||
|
|
||||||
|
about:
|
||||||
|
repository: https://github.com/deepmodeling/Uni-Lab-OS
|
||||||
|
license: GPL-3.0-only
|
||||||
|
description: "UniLabOS Full - Complete package with ROS2 Desktop, MoveIt, Navigation2, Gazebo, Jupyter"
|
||||||
@@ -1,91 +0,0 @@
|
|||||||
package:
|
|
||||||
name: unilabos
|
|
||||||
version: 0.10.15
|
|
||||||
|
|
||||||
source:
|
|
||||||
path: ../unilabos
|
|
||||||
target_directory: unilabos
|
|
||||||
|
|
||||||
build:
|
|
||||||
python:
|
|
||||||
entry_points:
|
|
||||||
- unilab = unilabos.app.main:main
|
|
||||||
script:
|
|
||||||
- set PIP_NO_INDEX=
|
|
||||||
- if: win
|
|
||||||
then:
|
|
||||||
- copy %RECIPE_DIR%\..\MANIFEST.in %SRC_DIR%
|
|
||||||
- copy %RECIPE_DIR%\..\setup.cfg %SRC_DIR%
|
|
||||||
- copy %RECIPE_DIR%\..\setup.py %SRC_DIR%
|
|
||||||
- call %PYTHON% -m pip install %SRC_DIR%
|
|
||||||
- if: unix
|
|
||||||
then:
|
|
||||||
- cp $RECIPE_DIR/../MANIFEST.in $SRC_DIR
|
|
||||||
- cp $RECIPE_DIR/../setup.cfg $SRC_DIR
|
|
||||||
- cp $RECIPE_DIR/../setup.py $SRC_DIR
|
|
||||||
- $PYTHON -m pip install $SRC_DIR
|
|
||||||
|
|
||||||
requirements:
|
|
||||||
host:
|
|
||||||
- python ==3.11.11
|
|
||||||
- pip
|
|
||||||
- setuptools
|
|
||||||
- zstd
|
|
||||||
- zstandard
|
|
||||||
run:
|
|
||||||
- conda-forge::python ==3.11.11
|
|
||||||
- compilers
|
|
||||||
- cmake
|
|
||||||
- zstd
|
|
||||||
- zstandard
|
|
||||||
- ninja
|
|
||||||
- if: unix
|
|
||||||
then:
|
|
||||||
- make
|
|
||||||
- sphinx
|
|
||||||
- sphinx_rtd_theme
|
|
||||||
- numpy
|
|
||||||
- scipy
|
|
||||||
- pandas
|
|
||||||
- networkx
|
|
||||||
- matplotlib
|
|
||||||
- pint
|
|
||||||
- pyserial
|
|
||||||
- pyusb
|
|
||||||
- pylibftdi
|
|
||||||
- pymodbus
|
|
||||||
- python-can
|
|
||||||
- pyvisa
|
|
||||||
- opencv
|
|
||||||
- pydantic
|
|
||||||
- fastapi
|
|
||||||
- uvicorn
|
|
||||||
- gradio
|
|
||||||
- flask
|
|
||||||
- websockets
|
|
||||||
- ipython
|
|
||||||
- jupyter
|
|
||||||
- jupyros
|
|
||||||
- colcon-common-extensions
|
|
||||||
- robostack-staging::ros-humble-desktop-full
|
|
||||||
- robostack-staging::ros-humble-control-msgs
|
|
||||||
- robostack-staging::ros-humble-sensor-msgs
|
|
||||||
- robostack-staging::ros-humble-trajectory-msgs
|
|
||||||
- ros-humble-navigation2
|
|
||||||
- ros-humble-ros2-control
|
|
||||||
- ros-humble-robot-state-publisher
|
|
||||||
- ros-humble-joint-state-publisher
|
|
||||||
- ros-humble-rosbridge-server
|
|
||||||
- ros-humble-cv-bridge
|
|
||||||
- ros-humble-tf2
|
|
||||||
- ros-humble-moveit
|
|
||||||
- ros-humble-moveit-servo
|
|
||||||
- ros-humble-simulation
|
|
||||||
- ros-humble-tf-transformations
|
|
||||||
- transforms3d
|
|
||||||
- uni-lab::ros-humble-unilabos-msgs
|
|
||||||
|
|
||||||
about:
|
|
||||||
repository: https://github.com/deepmodeling/Uni-Lab-OS
|
|
||||||
license: GPL-3.0-only
|
|
||||||
description: "Uni-Lab-OS"
|
|
||||||
67
.github/workflows/ci-check.yml
vendored
Normal file
67
.github/workflows/ci-check.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
name: CI Check
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [main, dev]
|
||||||
|
pull_request:
|
||||||
|
branches: [main, dev]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
registry-check:
|
||||||
|
runs-on: windows-latest
|
||||||
|
|
||||||
|
env:
|
||||||
|
# Fix Unicode encoding issue on Windows runner (cp1252 -> utf-8)
|
||||||
|
PYTHONIOENCODING: utf-8
|
||||||
|
PYTHONUTF8: 1
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: cmd
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v6
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
|
||||||
|
- name: Setup Miniforge
|
||||||
|
uses: conda-incubator/setup-miniconda@v3
|
||||||
|
with:
|
||||||
|
miniforge-version: latest
|
||||||
|
use-mamba: true
|
||||||
|
channels: robostack-staging,conda-forge,uni-lab
|
||||||
|
channel-priority: flexible
|
||||||
|
activate-environment: check-env
|
||||||
|
auto-update-conda: false
|
||||||
|
show-channel-urls: true
|
||||||
|
|
||||||
|
- name: Install ROS dependencies, uv and unilabos-msgs
|
||||||
|
run: |
|
||||||
|
echo Installing ROS dependencies...
|
||||||
|
mamba install -n check-env conda-forge::uv conda-forge::opencv robostack-staging::ros-humble-ros-core robostack-staging::ros-humble-action-msgs robostack-staging::ros-humble-std-msgs robostack-staging::ros-humble-geometry-msgs robostack-staging::ros-humble-control-msgs robostack-staging::ros-humble-nav2-msgs uni-lab::ros-humble-unilabos-msgs robostack-staging::ros-humble-cv-bridge robostack-staging::ros-humble-vision-opencv robostack-staging::ros-humble-tf-transformations robostack-staging::ros-humble-moveit-msgs robostack-staging::ros-humble-tf2-ros robostack-staging::ros-humble-tf2-ros-py conda-forge::transforms3d -c robostack-staging -c conda-forge -c uni-lab -y
|
||||||
|
|
||||||
|
- name: Install pip dependencies and unilabos
|
||||||
|
run: |
|
||||||
|
call conda activate check-env
|
||||||
|
echo Installing pip dependencies...
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt
|
||||||
|
uv pip install pywinauto git+https://github.com/Xuwznln/pylabrobot.git
|
||||||
|
uv pip uninstall enum34 || echo enum34 not installed, skipping
|
||||||
|
uv pip install .
|
||||||
|
|
||||||
|
- name: Run check mode (complete_registry)
|
||||||
|
run: |
|
||||||
|
call conda activate check-env
|
||||||
|
echo Running check mode...
|
||||||
|
python -m unilabos --check_mode --skip_env_check
|
||||||
|
|
||||||
|
- name: Check for uncommitted changes
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
if ! git diff --exit-code; then
|
||||||
|
echo "::error::检测到文件变化!请先在本地运行 'python -m unilabos --complete_registry' 并提交变更"
|
||||||
|
echo "变化的文件:"
|
||||||
|
git diff --name-only
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo "检查通过:无文件变化"
|
||||||
43
.github/workflows/conda-pack-build.yml
vendored
43
.github/workflows/conda-pack-build.yml
vendored
@@ -13,6 +13,11 @@ on:
|
|||||||
required: false
|
required: false
|
||||||
default: 'win-64'
|
default: 'win-64'
|
||||||
type: string
|
type: string
|
||||||
|
build_full:
|
||||||
|
description: '是否构建完整版 unilabos-full (默认构建轻量版 unilabos)'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build-conda-pack:
|
build-conda-pack:
|
||||||
@@ -57,7 +62,7 @@ jobs:
|
|||||||
echo "should_build=false" >> $GITHUB_OUTPUT
|
echo "should_build=false" >> $GITHUB_OUTPUT
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
if: steps.should_build.outputs.should_build == 'true'
|
if: steps.should_build.outputs.should_build == 'true'
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.inputs.branch }}
|
ref: ${{ github.event.inputs.branch }}
|
||||||
@@ -69,7 +74,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
miniforge-version: latest
|
miniforge-version: latest
|
||||||
use-mamba: true
|
use-mamba: true
|
||||||
python-version: '3.11.11'
|
python-version: '3.11.14'
|
||||||
channels: conda-forge,robostack-staging,uni-lab,defaults
|
channels: conda-forge,robostack-staging,uni-lab,defaults
|
||||||
channel-priority: flexible
|
channel-priority: flexible
|
||||||
activate-environment: unilab
|
activate-environment: unilab
|
||||||
@@ -81,7 +86,14 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo Installing unilabos and dependencies to unilab environment...
|
echo Installing unilabos and dependencies to unilab environment...
|
||||||
echo Using mamba for faster and more reliable dependency resolution...
|
echo Using mamba for faster and more reliable dependency resolution...
|
||||||
mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
|
echo Build full: ${{ github.event.inputs.build_full }}
|
||||||
|
if "${{ github.event.inputs.build_full }}"=="true" (
|
||||||
|
echo Installing unilabos-full ^(complete package^)...
|
||||||
|
mamba install -n unilab uni-lab::unilabos-full conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
|
||||||
|
) else (
|
||||||
|
echo Installing unilabos ^(minimal package^)...
|
||||||
|
mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
|
||||||
|
)
|
||||||
|
|
||||||
- name: Install conda-pack, unilabos and dependencies (Unix)
|
- name: Install conda-pack, unilabos and dependencies (Unix)
|
||||||
if: steps.should_build.outputs.should_build == 'true' && matrix.platform != 'win-64'
|
if: steps.should_build.outputs.should_build == 'true' && matrix.platform != 'win-64'
|
||||||
@@ -89,7 +101,14 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo "Installing unilabos and dependencies to unilab environment..."
|
echo "Installing unilabos and dependencies to unilab environment..."
|
||||||
echo "Using mamba for faster and more reliable dependency resolution..."
|
echo "Using mamba for faster and more reliable dependency resolution..."
|
||||||
mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
|
echo "Build full: ${{ github.event.inputs.build_full }}"
|
||||||
|
if [[ "${{ github.event.inputs.build_full }}" == "true" ]]; then
|
||||||
|
echo "Installing unilabos-full (complete package)..."
|
||||||
|
mamba install -n unilab uni-lab::unilabos-full conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
|
||||||
|
else
|
||||||
|
echo "Installing unilabos (minimal package)..."
|
||||||
|
mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Get latest ros-humble-unilabos-msgs version (Windows)
|
- name: Get latest ros-humble-unilabos-msgs version (Windows)
|
||||||
if: steps.should_build.outputs.should_build == 'true' && matrix.platform == 'win-64'
|
if: steps.should_build.outputs.should_build == 'true' && matrix.platform == 'win-64'
|
||||||
@@ -293,7 +312,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Upload distribution package
|
- name: Upload distribution package
|
||||||
if: steps.should_build.outputs.should_build == 'true'
|
if: steps.should_build.outputs.should_build == 'true'
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: unilab-pack-${{ matrix.platform }}-${{ github.event.inputs.branch }}
|
name: unilab-pack-${{ matrix.platform }}-${{ github.event.inputs.branch }}
|
||||||
path: dist-package/
|
path: dist-package/
|
||||||
@@ -308,7 +327,12 @@ jobs:
|
|||||||
echo ==========================================
|
echo ==========================================
|
||||||
echo Platform: ${{ matrix.platform }}
|
echo Platform: ${{ matrix.platform }}
|
||||||
echo Branch: ${{ github.event.inputs.branch }}
|
echo Branch: ${{ github.event.inputs.branch }}
|
||||||
echo Python version: 3.11.11
|
echo Python version: 3.11.14
|
||||||
|
if "${{ github.event.inputs.build_full }}"=="true" (
|
||||||
|
echo Package: unilabos-full ^(complete^)
|
||||||
|
) else (
|
||||||
|
echo Package: unilabos ^(minimal^)
|
||||||
|
)
|
||||||
echo.
|
echo.
|
||||||
echo Distribution package contents:
|
echo Distribution package contents:
|
||||||
dir dist-package
|
dir dist-package
|
||||||
@@ -328,7 +352,12 @@ jobs:
|
|||||||
echo "=========================================="
|
echo "=========================================="
|
||||||
echo "Platform: ${{ matrix.platform }}"
|
echo "Platform: ${{ matrix.platform }}"
|
||||||
echo "Branch: ${{ github.event.inputs.branch }}"
|
echo "Branch: ${{ github.event.inputs.branch }}"
|
||||||
echo "Python version: 3.11.11"
|
echo "Python version: 3.11.14"
|
||||||
|
if [[ "${{ github.event.inputs.build_full }}" == "true" ]]; then
|
||||||
|
echo "Package: unilabos-full (complete)"
|
||||||
|
else
|
||||||
|
echo "Package: unilabos (minimal)"
|
||||||
|
fi
|
||||||
echo ""
|
echo ""
|
||||||
echo "Distribution package contents:"
|
echo "Distribution package contents:"
|
||||||
ls -lh dist-package/
|
ls -lh dist-package/
|
||||||
|
|||||||
37
.github/workflows/deploy-docs.yml
vendored
37
.github/workflows/deploy-docs.yml
vendored
@@ -1,10 +1,12 @@
|
|||||||
name: Deploy Docs
|
name: Deploy Docs
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
# 在 CI Check 成功后自动触发(仅 main 分支)
|
||||||
branches: [main]
|
workflow_run:
|
||||||
pull_request:
|
workflows: ["CI Check"]
|
||||||
|
types: [completed]
|
||||||
branches: [main]
|
branches: [main]
|
||||||
|
# 手动触发
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
branch:
|
branch:
|
||||||
@@ -33,12 +35,19 @@ concurrency:
|
|||||||
jobs:
|
jobs:
|
||||||
# Build documentation
|
# Build documentation
|
||||||
build:
|
build:
|
||||||
|
# 只在以下情况运行:
|
||||||
|
# 1. workflow_run 触发且 CI Check 成功
|
||||||
|
# 2. 手动触发
|
||||||
|
if: |
|
||||||
|
github.event_name == 'workflow_dispatch' ||
|
||||||
|
(github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success')
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
ref: ${{ github.event.inputs.branch || github.ref }}
|
# workflow_run 时使用触发工作流的分支,手动触发时使用输入的分支
|
||||||
|
ref: ${{ github.event.workflow_run.head_branch || github.event.inputs.branch || github.ref }}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Setup Miniforge (with mamba)
|
- name: Setup Miniforge (with mamba)
|
||||||
@@ -46,7 +55,7 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
miniforge-version: latest
|
miniforge-version: latest
|
||||||
use-mamba: true
|
use-mamba: true
|
||||||
python-version: '3.11.11'
|
python-version: '3.11.14'
|
||||||
channels: conda-forge,robostack-staging,uni-lab,defaults
|
channels: conda-forge,robostack-staging,uni-lab,defaults
|
||||||
channel-priority: flexible
|
channel-priority: flexible
|
||||||
activate-environment: unilab
|
activate-environment: unilab
|
||||||
@@ -75,8 +84,10 @@ jobs:
|
|||||||
|
|
||||||
- name: Setup Pages
|
- name: Setup Pages
|
||||||
id: pages
|
id: pages
|
||||||
uses: actions/configure-pages@v4
|
uses: actions/configure-pages@v5
|
||||||
if: github.ref == 'refs/heads/main' || (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
|
if: |
|
||||||
|
github.event.workflow_run.head_branch == 'main' ||
|
||||||
|
(github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
|
||||||
|
|
||||||
- name: Build Sphinx documentation
|
- name: Build Sphinx documentation
|
||||||
run: |
|
run: |
|
||||||
@@ -94,14 +105,18 @@ jobs:
|
|||||||
test -f docs/_build/html/index.html && echo "✓ index.html exists" || echo "✗ index.html missing"
|
test -f docs/_build/html/index.html && echo "✓ index.html exists" || echo "✗ index.html missing"
|
||||||
|
|
||||||
- name: Upload build artifacts
|
- name: Upload build artifacts
|
||||||
uses: actions/upload-pages-artifact@v3
|
uses: actions/upload-pages-artifact@v4
|
||||||
if: github.ref == 'refs/heads/main' || (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
|
if: |
|
||||||
|
github.event.workflow_run.head_branch == 'main' ||
|
||||||
|
(github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
|
||||||
with:
|
with:
|
||||||
path: docs/_build/html
|
path: docs/_build/html
|
||||||
|
|
||||||
# Deploy to GitHub Pages
|
# Deploy to GitHub Pages
|
||||||
deploy:
|
deploy:
|
||||||
if: github.ref == 'refs/heads/main' || (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
|
if: |
|
||||||
|
github.event.workflow_run.head_branch == 'main' ||
|
||||||
|
(github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true')
|
||||||
environment:
|
environment:
|
||||||
name: github-pages
|
name: github-pages
|
||||||
url: ${{ steps.deployment.outputs.page_url }}
|
url: ${{ steps.deployment.outputs.page_url }}
|
||||||
|
|||||||
46
.github/workflows/multi-platform-build.yml
vendored
46
.github/workflows/multi-platform-build.yml
vendored
@@ -1,11 +1,16 @@
|
|||||||
name: Multi-Platform Conda Build
|
name: Multi-Platform Conda Build
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
# 在 CI Check 工作流完成后触发(仅限 main/dev 分支)
|
||||||
|
workflow_run:
|
||||||
|
workflows: ["CI Check"]
|
||||||
|
types:
|
||||||
|
- completed
|
||||||
|
branches: [main, dev]
|
||||||
|
# 支持 tag 推送(不依赖 CI Check)
|
||||||
push:
|
push:
|
||||||
branches: [main, dev]
|
|
||||||
tags: ['v*']
|
tags: ['v*']
|
||||||
pull_request:
|
# 手动触发
|
||||||
branches: [main, dev]
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
platforms:
|
platforms:
|
||||||
@@ -17,9 +22,37 @@ on:
|
|||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
type: boolean
|
type: boolean
|
||||||
|
skip_ci_check:
|
||||||
|
description: '跳过等待 CI Check (手动触发时可选)'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
# 等待 CI Check 完成的 job (仅用于 workflow_run 触发)
|
||||||
|
wait-for-ci:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event_name == 'workflow_run'
|
||||||
|
outputs:
|
||||||
|
should_continue: ${{ steps.check.outputs.should_continue }}
|
||||||
|
steps:
|
||||||
|
- name: Check CI status
|
||||||
|
id: check
|
||||||
|
run: |
|
||||||
|
if [[ "${{ github.event.workflow_run.conclusion }}" == "success" ]]; then
|
||||||
|
echo "should_continue=true" >> $GITHUB_OUTPUT
|
||||||
|
echo "CI Check passed, proceeding with build"
|
||||||
|
else
|
||||||
|
echo "should_continue=false" >> $GITHUB_OUTPUT
|
||||||
|
echo "CI Check did not succeed (status: ${{ github.event.workflow_run.conclusion }}), skipping build"
|
||||||
|
fi
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
needs: [wait-for-ci]
|
||||||
|
# 运行条件:workflow_run 触发且 CI 成功,或者其他触发方式
|
||||||
|
if: |
|
||||||
|
always() &&
|
||||||
|
(needs.wait-for-ci.result == 'skipped' || needs.wait-for-ci.outputs.should_continue == 'true')
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -44,8 +77,10 @@ jobs:
|
|||||||
shell: bash -l {0}
|
shell: bash -l {0}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
|
# 如果是 workflow_run 触发,使用触发 CI Check 的 commit
|
||||||
|
ref: ${{ github.event.workflow_run.head_sha || github.ref }}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Check if platform should be built
|
- name: Check if platform should be built
|
||||||
@@ -69,7 +104,6 @@ jobs:
|
|||||||
channels: conda-forge,robostack-staging,defaults
|
channels: conda-forge,robostack-staging,defaults
|
||||||
channel-priority: strict
|
channel-priority: strict
|
||||||
activate-environment: build-env
|
activate-environment: build-env
|
||||||
auto-activate-base: false
|
|
||||||
auto-update-conda: false
|
auto-update-conda: false
|
||||||
show-channel-urls: true
|
show-channel-urls: true
|
||||||
|
|
||||||
@@ -115,7 +149,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Upload conda package artifacts
|
- name: Upload conda package artifacts
|
||||||
if: steps.should_build.outputs.should_build == 'true'
|
if: steps.should_build.outputs.should_build == 'true'
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: conda-package-${{ matrix.platform }}
|
name: conda-package-${{ matrix.platform }}
|
||||||
path: conda-packages-temp
|
path: conda-packages-temp
|
||||||
|
|||||||
113
.github/workflows/unilabos-conda-build.yml
vendored
113
.github/workflows/unilabos-conda-build.yml
vendored
@@ -1,25 +1,62 @@
|
|||||||
name: UniLabOS Conda Build
|
name: UniLabOS Conda Build
|
||||||
|
|
||||||
on:
|
on:
|
||||||
|
# 在 CI Check 成功后自动触发
|
||||||
|
workflow_run:
|
||||||
|
workflows: ["CI Check"]
|
||||||
|
types: [completed]
|
||||||
|
branches: [main, dev]
|
||||||
|
# 标签推送时直接触发(发布版本)
|
||||||
push:
|
push:
|
||||||
branches: [main, dev]
|
|
||||||
tags: ['v*']
|
tags: ['v*']
|
||||||
pull_request:
|
# 手动触发
|
||||||
branches: [main, dev]
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
platforms:
|
platforms:
|
||||||
description: '选择构建平台 (逗号分隔): linux-64, osx-64, osx-arm64, win-64'
|
description: '选择构建平台 (逗号分隔): linux-64, osx-64, osx-arm64, win-64'
|
||||||
required: false
|
required: false
|
||||||
default: 'linux-64'
|
default: 'linux-64'
|
||||||
|
build_full:
|
||||||
|
description: '是否构建 unilabos-full 完整包 (默认只构建 unilabos 基础包)'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
upload_to_anaconda:
|
upload_to_anaconda:
|
||||||
description: '是否上传到Anaconda.org'
|
description: '是否上传到Anaconda.org'
|
||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
type: boolean
|
type: boolean
|
||||||
|
skip_ci_check:
|
||||||
|
description: '跳过等待 CI Check (手动触发时可选)'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
# 等待 CI Check 完成的 job (仅用于 workflow_run 触发)
|
||||||
|
wait-for-ci:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event_name == 'workflow_run'
|
||||||
|
outputs:
|
||||||
|
should_continue: ${{ steps.check.outputs.should_continue }}
|
||||||
|
steps:
|
||||||
|
- name: Check CI status
|
||||||
|
id: check
|
||||||
|
run: |
|
||||||
|
if [[ "${{ github.event.workflow_run.conclusion }}" == "success" ]]; then
|
||||||
|
echo "should_continue=true" >> $GITHUB_OUTPUT
|
||||||
|
echo "CI Check passed, proceeding with build"
|
||||||
|
else
|
||||||
|
echo "should_continue=false" >> $GITHUB_OUTPUT
|
||||||
|
echo "CI Check did not succeed (status: ${{ github.event.workflow_run.conclusion }}), skipping build"
|
||||||
|
fi
|
||||||
|
|
||||||
build:
|
build:
|
||||||
|
needs: [wait-for-ci]
|
||||||
|
# 运行条件:workflow_run 触发且 CI 成功,或者其他触发方式
|
||||||
|
if: |
|
||||||
|
always() &&
|
||||||
|
(needs.wait-for-ci.result == 'skipped' || needs.wait-for-ci.outputs.should_continue == 'true')
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -40,8 +77,10 @@ jobs:
|
|||||||
shell: bash -l {0}
|
shell: bash -l {0}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v6
|
||||||
with:
|
with:
|
||||||
|
# 如果是 workflow_run 触发,使用触发 CI Check 的 commit
|
||||||
|
ref: ${{ github.event.workflow_run.head_sha || github.ref }}
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Check if platform should be built
|
- name: Check if platform should be built
|
||||||
@@ -65,7 +104,6 @@ jobs:
|
|||||||
channels: conda-forge,robostack-staging,uni-lab,defaults
|
channels: conda-forge,robostack-staging,uni-lab,defaults
|
||||||
channel-priority: strict
|
channel-priority: strict
|
||||||
activate-environment: build-env
|
activate-environment: build-env
|
||||||
auto-activate-base: false
|
|
||||||
auto-update-conda: false
|
auto-update-conda: false
|
||||||
show-channel-urls: true
|
show-channel-urls: true
|
||||||
|
|
||||||
@@ -81,12 +119,61 @@ jobs:
|
|||||||
conda list | grep -E "(rattler-build|anaconda-client)"
|
conda list | grep -E "(rattler-build|anaconda-client)"
|
||||||
echo "Platform: ${{ matrix.platform }}"
|
echo "Platform: ${{ matrix.platform }}"
|
||||||
echo "OS: ${{ matrix.os }}"
|
echo "OS: ${{ matrix.os }}"
|
||||||
echo "Building UniLabOS package"
|
echo "Build full package: ${{ github.event.inputs.build_full || 'false' }}"
|
||||||
|
echo "Building packages:"
|
||||||
|
echo " - unilabos-env (environment dependencies)"
|
||||||
|
echo " - unilabos (with pip package)"
|
||||||
|
if [[ "${{ github.event.inputs.build_full }}" == "true" ]]; then
|
||||||
|
echo " - unilabos-full (complete package)"
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Build conda package
|
- name: Build unilabos-env (conda environment only, noarch)
|
||||||
if: steps.should_build.outputs.should_build == 'true'
|
if: steps.should_build.outputs.should_build == 'true'
|
||||||
run: |
|
run: |
|
||||||
rattler-build build -r .conda/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge
|
echo "Building unilabos-env (conda environment dependencies)..."
|
||||||
|
rattler-build build -r .conda/environment/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge
|
||||||
|
|
||||||
|
- name: Upload unilabos-env to Anaconda.org (if enabled)
|
||||||
|
if: steps.should_build.outputs.should_build == 'true' && github.event.inputs.upload_to_anaconda == 'true'
|
||||||
|
run: |
|
||||||
|
echo "Uploading unilabos-env to uni-lab organization..."
|
||||||
|
for package in $(find ./output -name "unilabos-env*.conda"); do
|
||||||
|
anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package"
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Build unilabos (with pip package)
|
||||||
|
if: steps.should_build.outputs.should_build == 'true'
|
||||||
|
run: |
|
||||||
|
echo "Building unilabos package..."
|
||||||
|
# 如果已上传到 Anaconda,从 uni-lab channel 获取 unilabos-env;否则从本地 output 获取
|
||||||
|
rattler-build build -r .conda/base/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge --channel ./output
|
||||||
|
|
||||||
|
- name: Upload unilabos to Anaconda.org (if enabled)
|
||||||
|
if: steps.should_build.outputs.should_build == 'true' && github.event.inputs.upload_to_anaconda == 'true'
|
||||||
|
run: |
|
||||||
|
echo "Uploading unilabos to uni-lab organization..."
|
||||||
|
for package in $(find ./output -name "unilabos-0*.conda" -o -name "unilabos-[0-9]*.conda"); do
|
||||||
|
anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package"
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Build unilabos-full - Only when explicitly requested
|
||||||
|
if: |
|
||||||
|
steps.should_build.outputs.should_build == 'true' &&
|
||||||
|
github.event.inputs.build_full == 'true'
|
||||||
|
run: |
|
||||||
|
echo "Building unilabos-full package on ${{ matrix.platform }}..."
|
||||||
|
rattler-build build -r .conda/full/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge --channel ./output
|
||||||
|
|
||||||
|
- name: Upload unilabos-full to Anaconda.org (if enabled)
|
||||||
|
if: |
|
||||||
|
steps.should_build.outputs.should_build == 'true' &&
|
||||||
|
github.event.inputs.build_full == 'true' &&
|
||||||
|
github.event.inputs.upload_to_anaconda == 'true'
|
||||||
|
run: |
|
||||||
|
echo "Uploading unilabos-full to uni-lab organization..."
|
||||||
|
for package in $(find ./output -name "unilabos-full*.conda"); do
|
||||||
|
anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package"
|
||||||
|
done
|
||||||
|
|
||||||
- name: List built packages
|
- name: List built packages
|
||||||
if: steps.should_build.outputs.should_build == 'true'
|
if: steps.should_build.outputs.should_build == 'true'
|
||||||
@@ -108,17 +195,9 @@ jobs:
|
|||||||
|
|
||||||
- name: Upload conda package artifacts
|
- name: Upload conda package artifacts
|
||||||
if: steps.should_build.outputs.should_build == 'true'
|
if: steps.should_build.outputs.should_build == 'true'
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v6
|
||||||
with:
|
with:
|
||||||
name: conda-package-unilabos-${{ matrix.platform }}
|
name: conda-package-unilabos-${{ matrix.platform }}
|
||||||
path: conda-packages-temp
|
path: conda-packages-temp
|
||||||
if-no-files-found: warn
|
if-no-files-found: warn
|
||||||
retention-days: 30
|
retention-days: 30
|
||||||
|
|
||||||
- name: Upload to Anaconda.org (uni-lab organization)
|
|
||||||
if: github.event.inputs.upload_to_anaconda == 'true'
|
|
||||||
run: |
|
|
||||||
for package in $(find ./output -name "*.conda"); do
|
|
||||||
echo "Uploading $package to uni-lab organization..."
|
|
||||||
anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package"
|
|
||||||
done
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
recursive-include unilabos/test *
|
recursive-include unilabos/test *
|
||||||
|
recursive-include unilabos/utils *
|
||||||
recursive-include unilabos/registry *.yaml
|
recursive-include unilabos/registry *.yaml
|
||||||
recursive-include unilabos/app/web/static *
|
recursive-include unilabos/app/web/static *
|
||||||
recursive-include unilabos/app/web/templates *
|
recursive-include unilabos/app/web/templates *
|
||||||
|
|||||||
38
README.md
38
README.md
@@ -31,26 +31,46 @@ Detailed documentation can be found at:
|
|||||||
|
|
||||||
## Quick Start
|
## Quick Start
|
||||||
|
|
||||||
1. Setup Conda Environment
|
### 1. Setup Conda Environment
|
||||||
|
|
||||||
Uni-Lab-OS recommends using `mamba` for environment management:
|
Uni-Lab-OS recommends using `mamba` for environment management. Choose the package that fits your needs:
|
||||||
|
|
||||||
|
| Package | Use Case | Contents |
|
||||||
|
|---------|----------|----------|
|
||||||
|
| `unilabos` | **Recommended for most users** | Complete package, ready to use |
|
||||||
|
| `unilabos-env` | Developers (editable install) | Environment only, install unilabos via pip |
|
||||||
|
| `unilabos-full` | Simulation/Visualization | unilabos + ROS2 Desktop + Gazebo + MoveIt |
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Create new environment
|
# Create new environment
|
||||||
mamba create -n unilab python=3.11.11
|
mamba create -n unilab python=3.11.14
|
||||||
mamba activate unilab
|
mamba activate unilab
|
||||||
mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge
|
|
||||||
|
# Option A: Standard installation (recommended for most users)
|
||||||
|
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
|
||||||
|
|
||||||
|
# Option B: For developers (editable mode development)
|
||||||
|
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
|
||||||
|
# Then install unilabos and dependencies:
|
||||||
|
git clone https://github.com/deepmodeling/Uni-Lab-OS.git && cd Uni-Lab-OS
|
||||||
|
pip install -e .
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt
|
||||||
|
|
||||||
|
# Option C: Full installation (simulation/visualization)
|
||||||
|
mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install Dev Uni-Lab-OS
|
**When to use which?**
|
||||||
|
- **unilabos**: Standard installation for production deployment and general usage (recommended)
|
||||||
|
- **unilabos-env**: For developers who need `pip install -e .` editable mode, modify source code
|
||||||
|
- **unilabos-full**: For simulation (Gazebo), visualization (rviz2), and Jupyter notebooks
|
||||||
|
|
||||||
|
### 2. Clone Repository (Optional, for developers)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Clone the repository
|
# Clone the repository (only needed for development or examples)
|
||||||
git clone https://github.com/deepmodeling/Uni-Lab-OS.git
|
git clone https://github.com/deepmodeling/Uni-Lab-OS.git
|
||||||
cd Uni-Lab-OS
|
cd Uni-Lab-OS
|
||||||
|
|
||||||
# Install Uni-Lab-OS
|
|
||||||
pip install .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Start Uni-Lab System
|
3. Start Uni-Lab System
|
||||||
|
|||||||
38
README_zh.md
38
README_zh.md
@@ -31,26 +31,46 @@ Uni-Lab-OS 是一个用于实验室自动化的综合平台,旨在连接和控
|
|||||||
|
|
||||||
## 快速开始
|
## 快速开始
|
||||||
|
|
||||||
1. 配置 Conda 环境
|
### 1. 配置 Conda 环境
|
||||||
|
|
||||||
Uni-Lab-OS 建议使用 `mamba` 管理环境。根据您的操作系统选择适当的环境文件:
|
Uni-Lab-OS 建议使用 `mamba` 管理环境。根据您的需求选择合适的安装包:
|
||||||
|
|
||||||
|
| 安装包 | 适用场景 | 包含内容 |
|
||||||
|
|--------|----------|----------|
|
||||||
|
| `unilabos` | **推荐大多数用户** | 完整安装包,开箱即用 |
|
||||||
|
| `unilabos-env` | 开发者(可编辑安装) | 仅环境依赖,通过 pip 安装 unilabos |
|
||||||
|
| `unilabos-full` | 仿真/可视化 | unilabos + ROS2 桌面版 + Gazebo + MoveIt |
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 创建新环境
|
# 创建新环境
|
||||||
mamba create -n unilab python=3.11.11
|
mamba create -n unilab python=3.11.14
|
||||||
mamba activate unilab
|
mamba activate unilab
|
||||||
mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge
|
|
||||||
|
# 方案 A:标准安装(推荐大多数用户)
|
||||||
|
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
|
||||||
|
|
||||||
|
# 方案 B:开发者环境(可编辑模式开发)
|
||||||
|
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
|
||||||
|
# 然后安装 unilabos 和依赖:
|
||||||
|
git clone https://github.com/deepmodeling/Uni-Lab-OS.git && cd Uni-Lab-OS
|
||||||
|
pip install -e .
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt
|
||||||
|
|
||||||
|
# 方案 C:完整安装(仿真/可视化)
|
||||||
|
mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge
|
||||||
```
|
```
|
||||||
|
|
||||||
2. 安装开发版 Uni-Lab-OS:
|
**如何选择?**
|
||||||
|
- **unilabos**:标准安装,适用于生产部署和日常使用(推荐)
|
||||||
|
- **unilabos-env**:开发者使用,支持 `pip install -e .` 可编辑模式,可修改源代码
|
||||||
|
- **unilabos-full**:需要仿真(Gazebo)、可视化(rviz2)或 Jupyter Notebook
|
||||||
|
|
||||||
|
### 2. 克隆仓库(可选,供开发者使用)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 克隆仓库
|
# 克隆仓库(仅开发或查看示例时需要)
|
||||||
git clone https://github.com/deepmodeling/Uni-Lab-OS.git
|
git clone https://github.com/deepmodeling/Uni-Lab-OS.git
|
||||||
cd Uni-Lab-OS
|
cd Uni-Lab-OS
|
||||||
|
|
||||||
# 安装 Uni-Lab-OS
|
|
||||||
pip install .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
3. 启动 Uni-Lab 系统
|
3. 启动 Uni-Lab 系统
|
||||||
|
|||||||
@@ -31,6 +31,14 @@
|
|||||||
|
|
||||||
详细的安装步骤请参考 [安装指南](installation.md)。
|
详细的安装步骤请参考 [安装指南](installation.md)。
|
||||||
|
|
||||||
|
**选择合适的安装包:**
|
||||||
|
|
||||||
|
| 安装包 | 适用场景 | 包含组件 |
|
||||||
|
|--------|----------|----------|
|
||||||
|
| `unilabos` | **推荐大多数用户**,生产部署 | 完整安装包,开箱即用 |
|
||||||
|
| `unilabos-env` | 开发者(可编辑安装) | 仅环境依赖,通过 pip 安装 unilabos |
|
||||||
|
| `unilabos-full` | 仿真/可视化 | unilabos + 完整 ROS2 桌面版 + Gazebo + MoveIt |
|
||||||
|
|
||||||
**关键步骤:**
|
**关键步骤:**
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -38,15 +46,30 @@
|
|||||||
# 下载 Miniforge: https://github.com/conda-forge/miniforge/releases
|
# 下载 Miniforge: https://github.com/conda-forge/miniforge/releases
|
||||||
|
|
||||||
# 2. 创建 Conda 环境
|
# 2. 创建 Conda 环境
|
||||||
mamba create -n unilab python=3.11.11
|
mamba create -n unilab python=3.11.14
|
||||||
|
|
||||||
# 3. 激活环境
|
# 3. 激活环境
|
||||||
mamba activate unilab
|
mamba activate unilab
|
||||||
|
|
||||||
# 4. 安装 Uni-Lab-OS
|
# 4. 安装 Uni-Lab-OS(选择其一)
|
||||||
|
|
||||||
|
# 方案 A:标准安装(推荐大多数用户)
|
||||||
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
|
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
|
||||||
|
|
||||||
|
# 方案 B:开发者环境(可编辑模式开发)
|
||||||
|
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
|
||||||
|
pip install -e /path/to/Uni-Lab-OS # 可编辑安装
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt # 安装 pip 依赖
|
||||||
|
|
||||||
|
# 方案 C:完整版(仿真/可视化)
|
||||||
|
mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**选择建议:**
|
||||||
|
- **日常使用/生产部署**:使用 `unilabos`(推荐),完整功能,开箱即用
|
||||||
|
- **开发者**:使用 `unilabos-env` + `pip install -e .` + `uv pip install -r unilabos/utils/requirements.txt`,代码修改立即生效
|
||||||
|
- **仿真/可视化**:使用 `unilabos-full`,含 Gazebo、rviz2、MoveIt
|
||||||
|
|
||||||
#### 1.2 验证安装
|
#### 1.2 验证安装
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -416,6 +439,9 @@ unilab --ak your_ak --sk your_sk -g test/experiments/mock_devices/mock_all.json
|
|||||||
1. 访问 Web 界面,进入"仪器耗材"模块
|
1. 访问 Web 界面,进入"仪器耗材"模块
|
||||||
2. 在"仪器设备"区域找到并添加上述设备
|
2. 在"仪器设备"区域找到并添加上述设备
|
||||||
3. 在"物料耗材"区域找到并添加容器
|
3. 在"物料耗材"区域找到并添加容器
|
||||||
|
4. 在workstation中配置protocol_type包含PumpTransferProtocol
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -426,8 +452,9 @@ unilab --ak your_ak --sk your_sk -g test/experiments/mock_devices/mock_all.json
|
|||||||
**操作步骤:**
|
**操作步骤:**
|
||||||
|
|
||||||
1. 将两个 `container` 拖拽到 `workstation` 中
|
1. 将两个 `container` 拖拽到 `workstation` 中
|
||||||
2. 将 `virtual_transfer_pump` 拖拽到 `workstation` 中
|
2. 将 `virtual_multiway_valve` 拖拽到 `workstation` 中
|
||||||
3. 在画布上连接它们(建立父子关系)
|
3. 将 `virtual_transfer_pump` 拖拽到 `workstation` 中
|
||||||
|
4. 在画布上连接它们(建立父子关系)
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@@ -768,7 +795,43 @@ Waiting for host service...
|
|||||||
|
|
||||||
详细的设备驱动编写指南请参考 [添加设备驱动](../developer_guide/add_device.md)。
|
详细的设备驱动编写指南请参考 [添加设备驱动](../developer_guide/add_device.md)。
|
||||||
|
|
||||||
#### 9.1 为什么需要自定义设备?
|
#### 9.1 开发环境准备
|
||||||
|
|
||||||
|
**推荐使用 `unilabos-env` + `pip install -e .` + `uv pip install`** 进行设备开发:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. 创建环境并安装 unilabos-env(ROS2 + conda 依赖 + uv)
|
||||||
|
mamba create -n unilab python=3.11.14
|
||||||
|
conda activate unilab
|
||||||
|
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
|
||||||
|
|
||||||
|
# 2. 克隆代码
|
||||||
|
git clone https://github.com/deepmodeling/Uni-Lab-OS.git
|
||||||
|
cd Uni-Lab-OS
|
||||||
|
|
||||||
|
# 3. 以可编辑模式安装(推荐使用脚本,自动检测中文环境)
|
||||||
|
python scripts/dev_install.py
|
||||||
|
|
||||||
|
# 或手动安装:
|
||||||
|
pip install -e .
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
**为什么使用这种方式?**
|
||||||
|
- `unilabos-env` 提供 ROS2 核心组件和 uv(通过 conda 安装,避免编译)
|
||||||
|
- `unilabos/utils/requirements.txt` 包含所有运行时需要的 pip 依赖
|
||||||
|
- `dev_install.py` 自动检测中文环境,中文系统自动使用清华镜像
|
||||||
|
- 使用 `uv` 替代 `pip`,安装速度更快
|
||||||
|
- 可编辑模式:代码修改**立即生效**,无需重新安装
|
||||||
|
|
||||||
|
**如果安装失败或速度太慢**,可以手动执行(使用清华镜像):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 9.2 为什么需要自定义设备?
|
||||||
|
|
||||||
Uni-Lab-OS 内置了常见设备,但您的实验室可能有特殊设备需要集成:
|
Uni-Lab-OS 内置了常见设备,但您的实验室可能有特殊设备需要集成:
|
||||||
|
|
||||||
@@ -777,7 +840,7 @@ Uni-Lab-OS 内置了常见设备,但您的实验室可能有特殊设备需要
|
|||||||
- 特殊的实验流程
|
- 特殊的实验流程
|
||||||
- 第三方设备集成
|
- 第三方设备集成
|
||||||
|
|
||||||
#### 9.2 创建 Python 包
|
#### 9.3 创建 Python 包
|
||||||
|
|
||||||
为了方便开发和管理,建议为您的实验室创建独立的 Python 包。
|
为了方便开发和管理,建议为您的实验室创建独立的 Python 包。
|
||||||
|
|
||||||
@@ -814,7 +877,7 @@ touch my_lab_devices/my_lab_devices/__init__.py
|
|||||||
touch my_lab_devices/my_lab_devices/devices/__init__.py
|
touch my_lab_devices/my_lab_devices/devices/__init__.py
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 9.3 创建 setup.py
|
#### 9.4 创建 setup.py
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# my_lab_devices/setup.py
|
# my_lab_devices/setup.py
|
||||||
@@ -845,7 +908,7 @@ setup(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 9.4 开发安装
|
#### 9.5 开发安装
|
||||||
|
|
||||||
使用 `-e` 参数进行可编辑安装,这样代码修改后立即生效:
|
使用 `-e` 参数进行可编辑安装,这样代码修改后立即生效:
|
||||||
|
|
||||||
@@ -860,7 +923,7 @@ pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
|||||||
- 方便调试和测试
|
- 方便调试和测试
|
||||||
- 支持版本控制(git)
|
- 支持版本控制(git)
|
||||||
|
|
||||||
#### 9.5 编写设备驱动
|
#### 9.6 编写设备驱动
|
||||||
|
|
||||||
创建设备驱动文件:
|
创建设备驱动文件:
|
||||||
|
|
||||||
@@ -1001,7 +1064,7 @@ class MyPump:
|
|||||||
- **返回 Dict**:所有动作方法返回字典类型
|
- **返回 Dict**:所有动作方法返回字典类型
|
||||||
- **文档字符串**:详细说明参数和功能
|
- **文档字符串**:详细说明参数和功能
|
||||||
|
|
||||||
#### 9.6 测试设备驱动
|
#### 9.7 测试设备驱动
|
||||||
|
|
||||||
创建简单的测试脚本:
|
创建简单的测试脚本:
|
||||||
|
|
||||||
|
|||||||
BIN
docs/user_guide/image/add_protocol.png
Normal file
BIN
docs/user_guide/image/add_protocol.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 81 KiB |
Binary file not shown.
|
Before Width: | Height: | Size: 275 KiB After Width: | Height: | Size: 415 KiB |
@@ -13,15 +13,26 @@
|
|||||||
- 开发者需要 Git 和基本的 Python 开发知识
|
- 开发者需要 Git 和基本的 Python 开发知识
|
||||||
- 自定义 msgs 需要 GitHub 账号
|
- 自定义 msgs 需要 GitHub 账号
|
||||||
|
|
||||||
|
## 安装包选择
|
||||||
|
|
||||||
|
Uni-Lab-OS 提供三个安装包版本,根据您的需求选择:
|
||||||
|
|
||||||
|
| 安装包 | 适用场景 | 包含组件 | 磁盘占用 |
|
||||||
|
|--------|----------|----------|----------|
|
||||||
|
| **unilabos** | **推荐大多数用户**,生产部署 | 完整安装包,开箱即用 | ~2-3 GB |
|
||||||
|
| **unilabos-env** | 开发者环境(可编辑安装) | 仅环境依赖,通过 pip 安装 unilabos | ~2 GB |
|
||||||
|
| **unilabos-full** | 仿真可视化、完整功能体验 | unilabos + 完整 ROS2 桌面版 + Gazebo + MoveIt | ~8-10 GB |
|
||||||
|
|
||||||
## 安装方式选择
|
## 安装方式选择
|
||||||
|
|
||||||
根据您的使用场景,选择合适的安装方式:
|
根据您的使用场景,选择合适的安装方式:
|
||||||
|
|
||||||
| 安装方式 | 适用人群 | 特点 | 安装时间 |
|
| 安装方式 | 适用人群 | 推荐安装包 | 特点 | 安装时间 |
|
||||||
| ---------------------- | -------------------- | ------------------------------ | ---------------------------- |
|
| ---------------------- | -------------------- | ----------------- | ------------------------------ | ---------------------------- |
|
||||||
| **方式一:一键安装** | 实验室用户、快速体验 | 预打包环境,离线可用,无需配置 | 5-10 分钟 (网络良好的情况下) |
|
| **方式一:一键安装** | 快速体验、演示 | 预打包环境 | 离线可用,无需配置 | 5-10 分钟 (网络良好的情况下) |
|
||||||
| **方式二:手动安装** | 标准用户、生产环境 | 灵活配置,版本可控 | 10-20 分钟 |
|
| **方式二:手动安装** | **大多数用户** | `unilabos` | 完整功能,开箱即用 | 10-20 分钟 |
|
||||||
| **方式三:开发者安装** | 开发者、需要修改源码 | 可编辑模式,支持自定义 msgs | 20-30 分钟 |
|
| **方式三:开发者安装** | 开发者、需要修改源码 | `unilabos-env` | 可编辑模式,支持自定义开发 | 20-30 分钟 |
|
||||||
|
| **仿真/可视化** | 仿真测试、可视化调试 | `unilabos-full` | 含 Gazebo、rviz2、MoveIt | 30-60 分钟 |
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -144,17 +155,38 @@ bash Miniforge3-$(uname)-$(uname -m).sh
|
|||||||
使用以下命令创建 Uni-Lab 专用环境:
|
使用以下命令创建 Uni-Lab 专用环境:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mamba create -n unilab python=3.11.11 # 目前ros2组件依赖版本大多为3.11.11
|
mamba create -n unilab python=3.11.14 # 目前ros2组件依赖版本大多为3.11.14
|
||||||
mamba activate unilab
|
mamba activate unilab
|
||||||
mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge
|
|
||||||
|
# 选择安装包(三选一):
|
||||||
|
|
||||||
|
# 方案 A:标准安装(推荐大多数用户)
|
||||||
|
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
|
||||||
|
|
||||||
|
# 方案 B:开发者环境(可编辑模式开发)
|
||||||
|
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
|
||||||
|
# 然后安装 unilabos 和 pip 依赖:
|
||||||
|
git clone https://github.com/deepmodeling/Uni-Lab-OS.git && cd Uni-Lab-OS
|
||||||
|
pip install -e .
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt
|
||||||
|
|
||||||
|
# 方案 C:完整版(含仿真和可视化工具)
|
||||||
|
mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge
|
||||||
```
|
```
|
||||||
|
|
||||||
**参数说明**:
|
**参数说明**:
|
||||||
|
|
||||||
- `-n unilab`: 创建名为 "unilab" 的环境
|
- `-n unilab`: 创建名为 "unilab" 的环境
|
||||||
- `uni-lab::unilabos`: 从 uni-lab channel 安装 unilabos 包
|
- `uni-lab::unilabos`: 安装 unilabos 完整包,开箱即用(推荐)
|
||||||
|
- `uni-lab::unilabos-env`: 仅安装环境依赖,适合开发者使用 `pip install -e .`
|
||||||
|
- `uni-lab::unilabos-full`: 安装完整包(含 ROS2 Desktop、Gazebo、MoveIt 等)
|
||||||
- `-c robostack-staging -c conda-forge`: 添加额外的软件源
|
- `-c robostack-staging -c conda-forge`: 添加额外的软件源
|
||||||
|
|
||||||
|
**包选择建议**:
|
||||||
|
- **日常使用/生产部署**:安装 `unilabos`(推荐,完整功能,开箱即用)
|
||||||
|
- **开发者**:安装 `unilabos-env`,然后使用 `uv pip install -r unilabos/utils/requirements.txt` 安装依赖,再 `pip install -e .` 进行可编辑安装
|
||||||
|
- **仿真/可视化**:安装 `unilabos-full`(Gazebo、rviz2、MoveIt)
|
||||||
|
|
||||||
**如果遇到网络问题**,可以使用清华镜像源加速下载:
|
**如果遇到网络问题**,可以使用清华镜像源加速下载:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@@ -163,8 +195,14 @@ mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/m
|
|||||||
mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
|
mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/
|
||||||
mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/
|
mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/
|
||||||
|
|
||||||
# 然后重新执行安装命令
|
# 然后重新执行安装命令(推荐标准安装)
|
||||||
mamba create -n unilab uni-lab::unilabos -c robostack-staging
|
mamba create -n unilab uni-lab::unilabos -c robostack-staging
|
||||||
|
|
||||||
|
# 或完整版(仿真/可视化)
|
||||||
|
mamba create -n unilab uni-lab::unilabos-full -c robostack-staging
|
||||||
|
|
||||||
|
# pip 安装时使用清华镜像(开发者安装时使用)
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
```
|
```
|
||||||
|
|
||||||
### 第三步:激活环境
|
### 第三步:激活环境
|
||||||
@@ -203,58 +241,87 @@ cd Uni-Lab-OS
|
|||||||
cd Uni-Lab-OS
|
cd Uni-Lab-OS
|
||||||
```
|
```
|
||||||
|
|
||||||
### 第二步:安装基础环境
|
### 第二步:安装开发环境(unilabos-env)
|
||||||
|
|
||||||
**推荐方式**:先通过**方式一(一键安装)**或**方式二(手动安装)**完成基础环境的安装,这将包含所有必需的依赖项(ROS2、msgs 等)。
|
**重要**:开发者请使用 `unilabos-env` 包,它专为开发者设计:
|
||||||
|
- 包含 ROS2 核心组件和消息包(ros-humble-ros-core、std-msgs、geometry-msgs 等)
|
||||||
#### 选项 A:通过一键安装(推荐)
|
- 包含 transforms3d、cv-bridge、tf2 等 conda 依赖
|
||||||
|
- 包含 `uv` 工具,用于快速安装 pip 依赖
|
||||||
参考上文"方式一:一键安装",完成基础环境的安装后,激活环境:
|
- **不包含** pip 依赖和 unilabos 包(由 `pip install -e .` 和 `uv pip install` 安装)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# 创建并激活环境
|
||||||
|
mamba create -n unilab python=3.11.14
|
||||||
conda activate unilab
|
conda activate unilab
|
||||||
|
|
||||||
|
# 安装开发者环境包(ROS2 + conda 依赖 + uv)
|
||||||
|
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
|
||||||
```
|
```
|
||||||
|
|
||||||
#### 选项 B:通过手动安装
|
### 第三步:安装 pip 依赖和可编辑模式安装
|
||||||
|
|
||||||
参考上文"方式二:手动安装",创建并安装环境:
|
克隆代码并安装依赖:
|
||||||
|
|
||||||
```bash
|
|
||||||
mamba create -n unilab python=3.11.11
|
|
||||||
conda activate unilab
|
|
||||||
mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge
|
|
||||||
```
|
|
||||||
|
|
||||||
**说明**:这会安装包括 Python 3.11.11、ROS2 Humble、ros-humble-unilabos-msgs 和所有必需依赖
|
|
||||||
|
|
||||||
### 第三步:切换到开发版本
|
|
||||||
|
|
||||||
现在你已经有了一个完整可用的 Uni-Lab 环境,接下来将 unilabos 包切换为开发版本:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# 确保环境已激活
|
# 确保环境已激活
|
||||||
conda activate unilab
|
conda activate unilab
|
||||||
|
|
||||||
# 卸载 pip 安装的 unilabos(保留所有 conda 依赖)
|
# 克隆仓库(如果还未克隆)
|
||||||
pip uninstall unilabos -y
|
git clone https://github.com/deepmodeling/Uni-Lab-OS.git
|
||||||
|
|
||||||
# 克隆 dev 分支(如果还未克隆)
|
|
||||||
cd /path/to/your/workspace
|
|
||||||
git clone -b dev https://github.com/deepmodeling/Uni-Lab-OS.git
|
|
||||||
# 或者如果已经克隆,切换到 dev 分支
|
|
||||||
cd Uni-Lab-OS
|
cd Uni-Lab-OS
|
||||||
|
|
||||||
|
# 切换到 dev 分支(可选)
|
||||||
git checkout dev
|
git checkout dev
|
||||||
git pull
|
git pull
|
||||||
|
|
||||||
# 以可编辑模式安装开发版 unilabos
|
|
||||||
pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**参数说明**:
|
**推荐:使用安装脚本**(自动检测中文环境,使用 uv 加速):
|
||||||
|
|
||||||
- `-e`: editable mode(可编辑模式),代码修改立即生效,无需重新安装
|
```bash
|
||||||
- `-i`: 使用清华镜像源加速下载
|
# 自动检测中文环境,如果是中文系统则使用清华镜像
|
||||||
- `pip uninstall unilabos`: 只卸载 pip 安装的 unilabos 包,不影响 conda 安装的其他依赖(如 ROS2、msgs 等)
|
python scripts/dev_install.py
|
||||||
|
|
||||||
|
# 或者手动指定:
|
||||||
|
python scripts/dev_install.py --china # 强制使用清华镜像
|
||||||
|
python scripts/dev_install.py --no-mirror # 强制使用 PyPI
|
||||||
|
python scripts/dev_install.py --skip-deps # 跳过 pip 依赖安装
|
||||||
|
python scripts/dev_install.py --use-pip # 使用 pip 而非 uv
|
||||||
|
```
|
||||||
|
|
||||||
|
**手动安装**(如果脚本安装失败或速度太慢):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. 安装 unilabos(可编辑模式)
|
||||||
|
pip install -e .
|
||||||
|
|
||||||
|
# 2. 使用 uv 安装 pip 依赖(推荐,速度更快)
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt
|
||||||
|
|
||||||
|
# 国内用户使用清华镜像:
|
||||||
|
pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
|
```
|
||||||
|
|
||||||
|
**注意**:
|
||||||
|
- `uv` 已包含在 `unilabos-env` 中,无需单独安装
|
||||||
|
- `unilabos/utils/requirements.txt` 包含运行 unilabos 所需的所有 pip 依赖
|
||||||
|
- 部分特殊包(如 pylabrobot)会在运行时由 unilabos 自动检测并安装
|
||||||
|
|
||||||
|
**为什么使用可编辑模式?**
|
||||||
|
|
||||||
|
- `-e` (editable mode):代码修改**立即生效**,无需重新安装
|
||||||
|
- 适合开发调试:修改代码后直接运行测试
|
||||||
|
- 与 `unilabos-env` 配合:环境依赖由 conda 管理,unilabos 代码由 pip 管理
|
||||||
|
|
||||||
|
**验证安装**:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 检查 unilabos 版本
|
||||||
|
python -c "import unilabos; print(unilabos.__version__)"
|
||||||
|
|
||||||
|
# 检查安装位置(应该指向你的代码目录)
|
||||||
|
pip show unilabos | grep Location
|
||||||
|
```
|
||||||
|
|
||||||
### 第四步:安装或自定义 ros-humble-unilabos-msgs(可选)
|
### 第四步:安装或自定义 ros-humble-unilabos-msgs(可选)
|
||||||
|
|
||||||
@@ -464,7 +531,45 @@ cd $CONDA_PREFIX/envs/unilab
|
|||||||
|
|
||||||
### 问题 8: 环境很大,有办法减小吗?
|
### 问题 8: 环境很大,有办法减小吗?
|
||||||
|
|
||||||
**解决方案**: 预打包的环境包含所有依赖,通常较大(压缩后 2-5GB)。这是为了确保离线安装和完整功能。如果空间有限,考虑使用方式二手动安装,只安装需要的组件。
|
**解决方案**:
|
||||||
|
|
||||||
|
1. **使用 `unilabos` 标准版**(推荐大多数用户):
|
||||||
|
```bash
|
||||||
|
mamba install uni-lab::unilabos -c robostack-staging -c conda-forge
|
||||||
|
```
|
||||||
|
标准版包含完整功能,环境大小约 2-3GB(相比完整版的 8-10GB)。
|
||||||
|
|
||||||
|
2. **使用 `unilabos-env` 开发者版**(最小化):
|
||||||
|
```bash
|
||||||
|
mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge
|
||||||
|
# 然后手动安装依赖
|
||||||
|
pip install -e .
|
||||||
|
uv pip install -r unilabos/utils/requirements.txt
|
||||||
|
```
|
||||||
|
开发者版只包含环境依赖,体积最小约 2GB。
|
||||||
|
|
||||||
|
3. **按需安装额外组件**:
|
||||||
|
如果后续需要特定功能,可以单独安装:
|
||||||
|
```bash
|
||||||
|
# 需要 Jupyter
|
||||||
|
mamba install jupyter jupyros
|
||||||
|
|
||||||
|
# 需要可视化
|
||||||
|
mamba install matplotlib opencv
|
||||||
|
|
||||||
|
# 需要仿真(注意:这会安装大量依赖)
|
||||||
|
mamba install ros-humble-gazebo-ros
|
||||||
|
```
|
||||||
|
|
||||||
|
4. **预打包环境问题**:
|
||||||
|
预打包环境(方式一)包含所有依赖,通常较大(压缩后 2-5GB)。这是为了确保离线安装和完整功能。
|
||||||
|
|
||||||
|
**包选择建议**:
|
||||||
|
| 需求 | 推荐包 | 预估大小 |
|
||||||
|
|------|--------|----------|
|
||||||
|
| 日常使用/生产部署 | `unilabos` | ~2-3 GB |
|
||||||
|
| 开发调试(可编辑模式) | `unilabos-env` | ~2 GB |
|
||||||
|
| 仿真/可视化 | `unilabos-full` | ~8-10 GB |
|
||||||
|
|
||||||
### 问题 9: 如何更新到最新版本?
|
### 问题 9: 如何更新到最新版本?
|
||||||
|
|
||||||
@@ -511,6 +616,7 @@ mamba update ros-humble-unilabos-msgs -c uni-lab -c robostack-staging -c conda-f
|
|||||||
|
|
||||||
**提示**:
|
**提示**:
|
||||||
|
|
||||||
- 生产环境推荐使用方式二(手动安装)的稳定版本
|
- **大多数用户**推荐使用方式二(手动安装)的 `unilabos` 标准版
|
||||||
- 开发和测试推荐使用方式三(开发者安装)
|
- **开发者**推荐使用方式三(开发者安装),安装 `unilabos-env` 后使用 `uv pip install -r unilabos/utils/requirements.txt` 安装依赖
|
||||||
- 快速体验和演示推荐使用方式一(一键安装)
|
- **仿真/可视化**推荐安装 `unilabos-full` 完整版
|
||||||
|
- **快速体验和演示**推荐使用方式一(一键安装)
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
package:
|
package:
|
||||||
name: ros-humble-unilabos-msgs
|
name: ros-humble-unilabos-msgs
|
||||||
version: 0.10.15
|
version: 0.10.17
|
||||||
source:
|
source:
|
||||||
path: ../../unilabos_msgs
|
path: ../../unilabos_msgs
|
||||||
target_directory: src
|
target_directory: src
|
||||||
@@ -25,7 +25,7 @@ requirements:
|
|||||||
build:
|
build:
|
||||||
- ${{ compiler('cxx') }}
|
- ${{ compiler('cxx') }}
|
||||||
- ${{ compiler('c') }}
|
- ${{ compiler('c') }}
|
||||||
- python ==3.11.11
|
- python ==3.11.14
|
||||||
- numpy
|
- numpy
|
||||||
- if: build_platform != target_platform
|
- if: build_platform != target_platform
|
||||||
then:
|
then:
|
||||||
@@ -63,14 +63,14 @@ requirements:
|
|||||||
- robostack-staging::ros-humble-rosidl-default-generators
|
- robostack-staging::ros-humble-rosidl-default-generators
|
||||||
- robostack-staging::ros-humble-std-msgs
|
- robostack-staging::ros-humble-std-msgs
|
||||||
- robostack-staging::ros-humble-geometry-msgs
|
- robostack-staging::ros-humble-geometry-msgs
|
||||||
- robostack-staging::ros2-distro-mutex=0.6
|
- robostack-staging::ros2-distro-mutex=0.7
|
||||||
run:
|
run:
|
||||||
- robostack-staging::ros-humble-action-msgs
|
- robostack-staging::ros-humble-action-msgs
|
||||||
- robostack-staging::ros-humble-ros-workspace
|
- robostack-staging::ros-humble-ros-workspace
|
||||||
- robostack-staging::ros-humble-rosidl-default-runtime
|
- robostack-staging::ros-humble-rosidl-default-runtime
|
||||||
- robostack-staging::ros-humble-std-msgs
|
- robostack-staging::ros-humble-std-msgs
|
||||||
- robostack-staging::ros-humble-geometry-msgs
|
- robostack-staging::ros-humble-geometry-msgs
|
||||||
- robostack-staging::ros2-distro-mutex=0.6
|
- robostack-staging::ros2-distro-mutex=0.7
|
||||||
- if: osx and x86_64
|
- if: osx and x86_64
|
||||||
then:
|
then:
|
||||||
- __osx >=${{ MACOSX_DEPLOYMENT_TARGET|default('10.14') }}
|
- __osx >=${{ MACOSX_DEPLOYMENT_TARGET|default('10.14') }}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
package:
|
package:
|
||||||
name: unilabos
|
name: unilabos
|
||||||
version: "0.10.15"
|
version: "0.10.17"
|
||||||
|
|
||||||
source:
|
source:
|
||||||
path: ../..
|
path: ../..
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ Verification:
|
|||||||
-------------
|
-------------
|
||||||
|
|
||||||
The verify_installation.py script will check:
|
The verify_installation.py script will check:
|
||||||
- Python version (3.11.11)
|
- Python version (3.11.14)
|
||||||
- ROS2 rclpy installation
|
- ROS2 rclpy installation
|
||||||
- UniLabOS installation and dependencies
|
- UniLabOS installation and dependencies
|
||||||
|
|
||||||
@@ -104,7 +104,7 @@ Build Information:
|
|||||||
|
|
||||||
Branch: {branch}
|
Branch: {branch}
|
||||||
Platform: {platform}
|
Platform: {platform}
|
||||||
Python: 3.11.11
|
Python: 3.11.14
|
||||||
Date: {build_date}
|
Date: {build_date}
|
||||||
|
|
||||||
Troubleshooting:
|
Troubleshooting:
|
||||||
|
|||||||
214
scripts/dev_install.py
Normal file
214
scripts/dev_install.py
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Development installation script for UniLabOS.
|
||||||
|
Auto-detects Chinese locale and uses appropriate mirror.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python scripts/dev_install.py
|
||||||
|
python scripts/dev_install.py --no-mirror # Force no mirror
|
||||||
|
python scripts/dev_install.py --china # Force China mirror
|
||||||
|
python scripts/dev_install.py --skip-deps # Skip pip dependencies installation
|
||||||
|
|
||||||
|
Flow:
|
||||||
|
1. pip install -e . (install unilabos in editable mode)
|
||||||
|
2. Detect Chinese locale
|
||||||
|
3. Use uv to install pip dependencies from requirements.txt
|
||||||
|
4. Special packages (like pylabrobot) are handled by environment_check.py at runtime
|
||||||
|
"""
|
||||||
|
|
||||||
|
import locale
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Tsinghua mirror URL
|
||||||
|
TSINGHUA_MIRROR = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple"
|
||||||
|
|
||||||
|
|
||||||
|
def is_chinese_locale() -> bool:
|
||||||
|
"""
|
||||||
|
Detect if system is in Chinese locale.
|
||||||
|
Same logic as EnvironmentChecker._is_chinese_locale()
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
lang = locale.getdefaultlocale()[0]
|
||||||
|
if lang and ("zh" in lang.lower() or "chinese" in lang.lower()):
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def run_command(cmd: list, description: str, retry: int = 2) -> bool:
|
||||||
|
"""Run command with retry support."""
|
||||||
|
print(f"[INFO] {description}")
|
||||||
|
print(f"[CMD] {' '.join(cmd)}")
|
||||||
|
|
||||||
|
for attempt in range(retry + 1):
|
||||||
|
try:
|
||||||
|
result = subprocess.run(cmd, check=True, timeout=600)
|
||||||
|
print(f"[OK] {description}")
|
||||||
|
return True
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
if attempt < retry:
|
||||||
|
print(f"[WARN] Attempt {attempt + 1} failed, retrying...")
|
||||||
|
else:
|
||||||
|
print(f"[ERROR] {description} failed: {e}")
|
||||||
|
return False
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
print(f"[ERROR] {description} timed out")
|
||||||
|
return False
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def install_editable(project_root: Path, use_mirror: bool) -> bool:
|
||||||
|
"""Install unilabos in editable mode using pip."""
|
||||||
|
cmd = [sys.executable, "-m", "pip", "install", "-e", str(project_root)]
|
||||||
|
if use_mirror:
|
||||||
|
cmd.extend(["-i", TSINGHUA_MIRROR])
|
||||||
|
|
||||||
|
return run_command(cmd, "Installing unilabos in editable mode")
|
||||||
|
|
||||||
|
|
||||||
|
def install_requirements_uv(requirements_file: Path, use_mirror: bool) -> bool:
|
||||||
|
"""Install pip dependencies using uv (installed via conda-forge::uv)."""
|
||||||
|
cmd = ["uv", "pip", "install", "-r", str(requirements_file)]
|
||||||
|
if use_mirror:
|
||||||
|
cmd.extend(["-i", TSINGHUA_MIRROR])
|
||||||
|
|
||||||
|
return run_command(cmd, "Installing pip dependencies with uv", retry=2)
|
||||||
|
|
||||||
|
|
||||||
|
def install_requirements_pip(requirements_file: Path, use_mirror: bool) -> bool:
|
||||||
|
"""Fallback: Install pip dependencies using pip."""
|
||||||
|
cmd = [sys.executable, "-m", "pip", "install", "-r", str(requirements_file)]
|
||||||
|
if use_mirror:
|
||||||
|
cmd.extend(["-i", TSINGHUA_MIRROR])
|
||||||
|
|
||||||
|
return run_command(cmd, "Installing pip dependencies with pip", retry=2)
|
||||||
|
|
||||||
|
|
||||||
|
def check_uv_available() -> bool:
|
||||||
|
"""Check if uv is available (installed via conda-forge::uv)."""
|
||||||
|
try:
|
||||||
|
subprocess.run(["uv", "--version"], capture_output=True, check=True)
|
||||||
|
return True
|
||||||
|
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Development installation script for UniLabOS")
|
||||||
|
parser.add_argument("--china", action="store_true", help="Force use China mirror (Tsinghua)")
|
||||||
|
parser.add_argument("--no-mirror", action="store_true", help="Force use default PyPI (no mirror)")
|
||||||
|
parser.add_argument(
|
||||||
|
"--skip-deps", action="store_true", help="Skip pip dependencies installation (only install unilabos)"
|
||||||
|
)
|
||||||
|
parser.add_argument("--use-pip", action="store_true", help="Use pip instead of uv for dependencies")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Determine project root
|
||||||
|
script_dir = Path(__file__).parent
|
||||||
|
project_root = script_dir.parent
|
||||||
|
requirements_file = project_root / "unilabos" / "utils" / "requirements.txt"
|
||||||
|
|
||||||
|
if not (project_root / "setup.py").exists():
|
||||||
|
print(f"[ERROR] setup.py not found in {project_root}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
print("=" * 60)
|
||||||
|
print("UniLabOS Development Installation")
|
||||||
|
print("=" * 60)
|
||||||
|
print(f"Project root: {project_root}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Determine mirror usage based on locale
|
||||||
|
if args.no_mirror:
|
||||||
|
use_mirror = False
|
||||||
|
print("[INFO] Mirror disabled by --no-mirror flag")
|
||||||
|
elif args.china:
|
||||||
|
use_mirror = True
|
||||||
|
print("[INFO] China mirror enabled by --china flag")
|
||||||
|
else:
|
||||||
|
use_mirror = is_chinese_locale()
|
||||||
|
if use_mirror:
|
||||||
|
print("[INFO] Chinese locale detected, using Tsinghua mirror")
|
||||||
|
else:
|
||||||
|
print("[INFO] Non-Chinese locale detected, using default PyPI")
|
||||||
|
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Step 1: Install unilabos in editable mode
|
||||||
|
print("[STEP 1] Installing unilabos in editable mode...")
|
||||||
|
if not install_editable(project_root, use_mirror):
|
||||||
|
print("[ERROR] Failed to install unilabos")
|
||||||
|
print()
|
||||||
|
print("Manual fallback:")
|
||||||
|
if use_mirror:
|
||||||
|
print(f" pip install -e {project_root} -i {TSINGHUA_MIRROR}")
|
||||||
|
else:
|
||||||
|
print(f" pip install -e {project_root}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Step 2: Install pip dependencies
|
||||||
|
if args.skip_deps:
|
||||||
|
print("[INFO] Skipping pip dependencies installation (--skip-deps)")
|
||||||
|
else:
|
||||||
|
print("[STEP 2] Installing pip dependencies...")
|
||||||
|
|
||||||
|
if not requirements_file.exists():
|
||||||
|
print(f"[WARN] Requirements file not found: {requirements_file}")
|
||||||
|
print("[INFO] Skipping dependencies installation")
|
||||||
|
else:
|
||||||
|
# Try uv first (faster), fallback to pip
|
||||||
|
if args.use_pip:
|
||||||
|
print("[INFO] Using pip (--use-pip flag)")
|
||||||
|
success = install_requirements_pip(requirements_file, use_mirror)
|
||||||
|
elif check_uv_available():
|
||||||
|
print("[INFO] Using uv (installed via conda-forge::uv)")
|
||||||
|
success = install_requirements_uv(requirements_file, use_mirror)
|
||||||
|
if not success:
|
||||||
|
print("[WARN] uv failed, falling back to pip...")
|
||||||
|
success = install_requirements_pip(requirements_file, use_mirror)
|
||||||
|
else:
|
||||||
|
print("[WARN] uv not available (should be installed via: mamba install conda-forge::uv)")
|
||||||
|
print("[INFO] Falling back to pip...")
|
||||||
|
success = install_requirements_pip(requirements_file, use_mirror)
|
||||||
|
|
||||||
|
if not success:
|
||||||
|
print()
|
||||||
|
print("[WARN] Failed to install some dependencies automatically.")
|
||||||
|
print("You can manually install them:")
|
||||||
|
if use_mirror:
|
||||||
|
print(f" uv pip install -r {requirements_file} -i {TSINGHUA_MIRROR}")
|
||||||
|
print(" or:")
|
||||||
|
print(f" pip install -r {requirements_file} -i {TSINGHUA_MIRROR}")
|
||||||
|
else:
|
||||||
|
print(f" uv pip install -r {requirements_file}")
|
||||||
|
print(" or:")
|
||||||
|
print(f" pip install -r {requirements_file}")
|
||||||
|
|
||||||
|
print()
|
||||||
|
print("=" * 60)
|
||||||
|
print("Installation complete!")
|
||||||
|
print("=" * 60)
|
||||||
|
print()
|
||||||
|
print("Note: Some special packages (like pylabrobot) are installed")
|
||||||
|
print("automatically at runtime by unilabos if needed.")
|
||||||
|
print()
|
||||||
|
print("Verify installation:")
|
||||||
|
print(' python -c "import unilabos; print(unilabos.__version__)"')
|
||||||
|
print()
|
||||||
|
print("If you encounter issues, you can manually install dependencies:")
|
||||||
|
if use_mirror:
|
||||||
|
print(f" uv pip install -r unilabos/utils/requirements.txt -i {TSINGHUA_MIRROR}")
|
||||||
|
else:
|
||||||
|
print(" uv pip install -r unilabos/utils/requirements.txt")
|
||||||
|
print()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
2
setup.py
2
setup.py
@@ -4,7 +4,7 @@ package_name = 'unilabos'
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name=package_name,
|
name=package_name,
|
||||||
version='0.10.15',
|
version='0.10.17',
|
||||||
packages=find_packages(),
|
packages=find_packages(),
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
install_requires=['setuptools'],
|
install_requires=['setuptools'],
|
||||||
|
|||||||
213
tests/workflow/test.json
Normal file
213
tests/workflow/test.json
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
{
|
||||||
|
"workflow": [
|
||||||
|
{
|
||||||
|
"action": "transfer_liquid",
|
||||||
|
"action_args": {
|
||||||
|
"sources": "cell_lines",
|
||||||
|
"targets": "Liquid_1",
|
||||||
|
"asp_vol": 100.0,
|
||||||
|
"dis_vol": 74.75,
|
||||||
|
"asp_flow_rate": 94.0,
|
||||||
|
"dis_flow_rate": 95.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "transfer_liquid",
|
||||||
|
"action_args": {
|
||||||
|
"sources": "cell_lines",
|
||||||
|
"targets": "Liquid_2",
|
||||||
|
"asp_vol": 100.0,
|
||||||
|
"dis_vol": 74.75,
|
||||||
|
"asp_flow_rate": 94.0,
|
||||||
|
"dis_flow_rate": 95.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "transfer_liquid",
|
||||||
|
"action_args": {
|
||||||
|
"sources": "cell_lines",
|
||||||
|
"targets": "Liquid_3",
|
||||||
|
"asp_vol": 100.0,
|
||||||
|
"dis_vol": 74.75,
|
||||||
|
"asp_flow_rate": 94.0,
|
||||||
|
"dis_flow_rate": 95.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "transfer_liquid",
|
||||||
|
"action_args": {
|
||||||
|
"sources": "cell_lines_2",
|
||||||
|
"targets": "Liquid_4",
|
||||||
|
"asp_vol": 100.0,
|
||||||
|
"dis_vol": 74.75,
|
||||||
|
"asp_flow_rate": 94.0,
|
||||||
|
"dis_flow_rate": 95.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "transfer_liquid",
|
||||||
|
"action_args": {
|
||||||
|
"sources": "cell_lines_2",
|
||||||
|
"targets": "Liquid_5",
|
||||||
|
"asp_vol": 100.0,
|
||||||
|
"dis_vol": 74.75,
|
||||||
|
"asp_flow_rate": 94.0,
|
||||||
|
"dis_flow_rate": 95.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "transfer_liquid",
|
||||||
|
"action_args": {
|
||||||
|
"sources": "cell_lines_2",
|
||||||
|
"targets": "Liquid_6",
|
||||||
|
"asp_vol": 100.0,
|
||||||
|
"dis_vol": 74.75,
|
||||||
|
"asp_flow_rate": 94.0,
|
||||||
|
"dis_flow_rate": 95.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "transfer_liquid",
|
||||||
|
"action_args": {
|
||||||
|
"sources": "cell_lines_3",
|
||||||
|
"targets": "dest_set",
|
||||||
|
"asp_vol": 100.0,
|
||||||
|
"dis_vol": 74.75,
|
||||||
|
"asp_flow_rate": 94.0,
|
||||||
|
"dis_flow_rate": 95.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "transfer_liquid",
|
||||||
|
"action_args": {
|
||||||
|
"sources": "cell_lines_3",
|
||||||
|
"targets": "dest_set_2",
|
||||||
|
"asp_vol": 100.0,
|
||||||
|
"dis_vol": 74.75,
|
||||||
|
"asp_flow_rate": 94.0,
|
||||||
|
"dis_flow_rate": 95.5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "transfer_liquid",
|
||||||
|
"action_args": {
|
||||||
|
"sources": "cell_lines_3",
|
||||||
|
"targets": "dest_set_3",
|
||||||
|
"asp_vol": 100.0,
|
||||||
|
"dis_vol": 74.75,
|
||||||
|
"asp_flow_rate": 94.0,
|
||||||
|
"dis_flow_rate": 95.5
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"reagent": {
|
||||||
|
"Liquid_1": {
|
||||||
|
"slot": 1,
|
||||||
|
"well": [
|
||||||
|
"A4",
|
||||||
|
"A7",
|
||||||
|
"A10"
|
||||||
|
],
|
||||||
|
"labware": "rep 1"
|
||||||
|
},
|
||||||
|
"Liquid_4": {
|
||||||
|
"slot": 1,
|
||||||
|
"well": [
|
||||||
|
"A4",
|
||||||
|
"A7",
|
||||||
|
"A10"
|
||||||
|
],
|
||||||
|
"labware": "rep 1"
|
||||||
|
},
|
||||||
|
"dest_set": {
|
||||||
|
"slot": 1,
|
||||||
|
"well": [
|
||||||
|
"A4",
|
||||||
|
"A7",
|
||||||
|
"A10"
|
||||||
|
],
|
||||||
|
"labware": "rep 1"
|
||||||
|
},
|
||||||
|
"Liquid_2": {
|
||||||
|
"slot": 2,
|
||||||
|
"well": [
|
||||||
|
"A3",
|
||||||
|
"A5",
|
||||||
|
"A8"
|
||||||
|
],
|
||||||
|
"labware": "rep 2"
|
||||||
|
},
|
||||||
|
"Liquid_5": {
|
||||||
|
"slot": 2,
|
||||||
|
"well": [
|
||||||
|
"A3",
|
||||||
|
"A5",
|
||||||
|
"A8"
|
||||||
|
],
|
||||||
|
"labware": "rep 2"
|
||||||
|
},
|
||||||
|
"dest_set_2": {
|
||||||
|
"slot": 2,
|
||||||
|
"well": [
|
||||||
|
"A3",
|
||||||
|
"A5",
|
||||||
|
"A8"
|
||||||
|
],
|
||||||
|
"labware": "rep 2"
|
||||||
|
},
|
||||||
|
"Liquid_3": {
|
||||||
|
"slot": 3,
|
||||||
|
"well": [
|
||||||
|
"A4",
|
||||||
|
"A6",
|
||||||
|
"A10"
|
||||||
|
],
|
||||||
|
"labware": "rep 3"
|
||||||
|
},
|
||||||
|
"Liquid_6": {
|
||||||
|
"slot": 3,
|
||||||
|
"well": [
|
||||||
|
"A4",
|
||||||
|
"A6",
|
||||||
|
"A10"
|
||||||
|
],
|
||||||
|
"labware": "rep 3"
|
||||||
|
},
|
||||||
|
"dest_set_3": {
|
||||||
|
"slot": 3,
|
||||||
|
"well": [
|
||||||
|
"A4",
|
||||||
|
"A6",
|
||||||
|
"A10"
|
||||||
|
],
|
||||||
|
"labware": "rep 3"
|
||||||
|
},
|
||||||
|
"cell_lines": {
|
||||||
|
"slot": 4,
|
||||||
|
"well": [
|
||||||
|
"A1",
|
||||||
|
"A3",
|
||||||
|
"A5"
|
||||||
|
],
|
||||||
|
"labware": "DRUG + YOYO-MEDIA"
|
||||||
|
},
|
||||||
|
"cell_lines_2": {
|
||||||
|
"slot": 4,
|
||||||
|
"well": [
|
||||||
|
"A1",
|
||||||
|
"A3",
|
||||||
|
"A5"
|
||||||
|
],
|
||||||
|
"labware": "DRUG + YOYO-MEDIA"
|
||||||
|
},
|
||||||
|
"cell_lines_3": {
|
||||||
|
"slot": 4,
|
||||||
|
"well": [
|
||||||
|
"A1",
|
||||||
|
"A3",
|
||||||
|
"A5"
|
||||||
|
],
|
||||||
|
"labware": "DRUG + YOYO-MEDIA"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1 +1 @@
|
|||||||
__version__ = "0.10.15"
|
__version__ = "0.10.17"
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import sys
|
|||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
from typing import Dict, Any, List
|
from typing import Dict, Any, List
|
||||||
|
|
||||||
import networkx as nx
|
import networkx as nx
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
@@ -17,9 +16,9 @@ unilabos_dir = os.path.dirname(os.path.dirname(current_dir))
|
|||||||
if unilabos_dir not in sys.path:
|
if unilabos_dir not in sys.path:
|
||||||
sys.path.append(unilabos_dir)
|
sys.path.append(unilabos_dir)
|
||||||
|
|
||||||
|
from unilabos.app.utils import cleanup_for_restart
|
||||||
from unilabos.utils.banner_print import print_status, print_unilab_banner
|
from unilabos.utils.banner_print import print_status, print_unilab_banner
|
||||||
from unilabos.config.config import load_config, BasicConfig, HTTPConfig
|
from unilabos.config.config import load_config, BasicConfig, HTTPConfig
|
||||||
from unilabos.app.utils import cleanup_for_restart
|
|
||||||
|
|
||||||
# Global restart flags (used by ws_client and web/server)
|
# Global restart flags (used by ws_client and web/server)
|
||||||
_restart_requested: bool = False
|
_restart_requested: bool = False
|
||||||
@@ -161,6 +160,12 @@ def parse_args():
|
|||||||
default=False,
|
default=False,
|
||||||
help="Complete registry information",
|
help="Complete registry information",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--check_mode",
|
||||||
|
action="store_true",
|
||||||
|
default=False,
|
||||||
|
help="Run in check mode for CI: validates registry imports and ensures no file changes",
|
||||||
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--no_update_feedback",
|
"--no_update_feedback",
|
||||||
action="store_true",
|
action="store_true",
|
||||||
@@ -211,7 +216,10 @@ def main():
|
|||||||
args_dict = vars(args)
|
args_dict = vars(args)
|
||||||
|
|
||||||
# 环境检查 - 检查并自动安装必需的包 (可选)
|
# 环境检查 - 检查并自动安装必需的包 (可选)
|
||||||
if not args_dict.get("skip_env_check", False):
|
skip_env_check = args_dict.get("skip_env_check", False)
|
||||||
|
check_mode = args_dict.get("check_mode", False)
|
||||||
|
|
||||||
|
if not skip_env_check:
|
||||||
from unilabos.utils.environment_check import check_environment
|
from unilabos.utils.environment_check import check_environment
|
||||||
|
|
||||||
if not check_environment(auto_install=True):
|
if not check_environment(auto_install=True):
|
||||||
@@ -222,7 +230,21 @@ def main():
|
|||||||
|
|
||||||
# 加载配置文件,优先加载config,然后从env读取
|
# 加载配置文件,优先加载config,然后从env读取
|
||||||
config_path = args_dict.get("config")
|
config_path = args_dict.get("config")
|
||||||
if os.getcwd().endswith("unilabos_data"):
|
|
||||||
|
if check_mode:
|
||||||
|
args_dict["working_dir"] = os.path.abspath(os.getcwd())
|
||||||
|
# 当 skip_env_check 时,默认使用当前目录作为 working_dir
|
||||||
|
if skip_env_check and not args_dict.get("working_dir") and not config_path:
|
||||||
|
working_dir = os.path.abspath(os.getcwd())
|
||||||
|
print_status(f"跳过环境检查模式:使用当前目录作为工作目录 {working_dir}", "info")
|
||||||
|
# 检查当前目录是否有 local_config.py
|
||||||
|
local_config_in_cwd = os.path.join(working_dir, "local_config.py")
|
||||||
|
if os.path.exists(local_config_in_cwd):
|
||||||
|
config_path = local_config_in_cwd
|
||||||
|
print_status(f"发现本地配置文件: {config_path}", "info")
|
||||||
|
else:
|
||||||
|
print_status(f"未指定config路径,可通过 --config 传入 local_config.py 文件路径", "info")
|
||||||
|
elif os.getcwd().endswith("unilabos_data"):
|
||||||
working_dir = os.path.abspath(os.getcwd())
|
working_dir = os.path.abspath(os.getcwd())
|
||||||
else:
|
else:
|
||||||
working_dir = os.path.abspath(os.path.join(os.getcwd(), "unilabos_data"))
|
working_dir = os.path.abspath(os.path.join(os.getcwd(), "unilabos_data"))
|
||||||
@@ -241,7 +263,7 @@ def main():
|
|||||||
working_dir = os.path.dirname(config_path)
|
working_dir = os.path.dirname(config_path)
|
||||||
elif os.path.exists(working_dir) and os.path.exists(os.path.join(working_dir, "local_config.py")):
|
elif os.path.exists(working_dir) and os.path.exists(os.path.join(working_dir, "local_config.py")):
|
||||||
config_path = os.path.join(working_dir, "local_config.py")
|
config_path = os.path.join(working_dir, "local_config.py")
|
||||||
elif not config_path and (
|
elif not skip_env_check and not config_path and (
|
||||||
not os.path.exists(working_dir) or not os.path.exists(os.path.join(working_dir, "local_config.py"))
|
not os.path.exists(working_dir) or not os.path.exists(os.path.join(working_dir, "local_config.py"))
|
||||||
):
|
):
|
||||||
print_status(f"未指定config路径,可通过 --config 传入 local_config.py 文件路径", "info")
|
print_status(f"未指定config路径,可通过 --config 传入 local_config.py 文件路径", "info")
|
||||||
@@ -255,9 +277,11 @@ def main():
|
|||||||
print_status(f"已创建 local_config.py 路径: {config_path}", "info")
|
print_status(f"已创建 local_config.py 路径: {config_path}", "info")
|
||||||
else:
|
else:
|
||||||
os._exit(1)
|
os._exit(1)
|
||||||
# 加载配置文件
|
|
||||||
|
# 加载配置文件 (check_mode 跳过)
|
||||||
print_status(f"当前工作目录为 {working_dir}", "info")
|
print_status(f"当前工作目录为 {working_dir}", "info")
|
||||||
load_config_from_file(config_path)
|
if not check_mode:
|
||||||
|
load_config_from_file(config_path)
|
||||||
|
|
||||||
# 根据配置重新设置日志级别
|
# 根据配置重新设置日志级别
|
||||||
from unilabos.utils.log import configure_logger, logger
|
from unilabos.utils.log import configure_logger, logger
|
||||||
@@ -313,6 +337,7 @@ def main():
|
|||||||
machine_name = "".join([c if c.isalnum() or c == "_" else "_" for c in machine_name])
|
machine_name = "".join([c if c.isalnum() or c == "_" else "_" for c in machine_name])
|
||||||
BasicConfig.machine_name = machine_name
|
BasicConfig.machine_name = machine_name
|
||||||
BasicConfig.vis_2d_enable = args_dict["2d_vis"]
|
BasicConfig.vis_2d_enable = args_dict["2d_vis"]
|
||||||
|
BasicConfig.check_mode = check_mode
|
||||||
|
|
||||||
from unilabos.resources.graphio import (
|
from unilabos.resources.graphio import (
|
||||||
read_node_link_json,
|
read_node_link_json,
|
||||||
@@ -331,10 +356,14 @@ def main():
|
|||||||
# 显示启动横幅
|
# 显示启动横幅
|
||||||
print_unilab_banner(args_dict)
|
print_unilab_banner(args_dict)
|
||||||
|
|
||||||
# 注册表
|
# 注册表 - check_mode 时强制启用 complete_registry
|
||||||
lab_registry = build_registry(
|
complete_registry = args_dict.get("complete_registry", False) or check_mode
|
||||||
args_dict["registry_path"], args_dict.get("complete_registry", False), BasicConfig.upload_registry
|
lab_registry = build_registry(args_dict["registry_path"], complete_registry, BasicConfig.upload_registry)
|
||||||
)
|
|
||||||
|
# Check mode: complete_registry 完成后直接退出,git diff 检测由 CI workflow 执行
|
||||||
|
if check_mode:
|
||||||
|
print_status("Check mode: complete_registry 完成,退出", "info")
|
||||||
|
os._exit(0)
|
||||||
|
|
||||||
if BasicConfig.upload_registry:
|
if BasicConfig.upload_registry:
|
||||||
# 设备注册到服务端 - 需要 ak 和 sk
|
# 设备注册到服务端 - 需要 ak 和 sk
|
||||||
|
|||||||
@@ -54,6 +54,7 @@ class JobAddReq(BaseModel):
|
|||||||
action_type: str = Field(
|
action_type: str = Field(
|
||||||
examples=["unilabos_msgs.action._str_single_input.StrSingleInput"], description="action type", default=""
|
examples=["unilabos_msgs.action._str_single_input.StrSingleInput"], description="action type", default=""
|
||||||
)
|
)
|
||||||
|
sample_material: dict = Field(examples=[{"string": "string"}], description="sample uuid to material uuid")
|
||||||
action_args: dict = Field(examples=[{"string": "string"}], description="action arguments", default_factory=dict)
|
action_args: dict = Field(examples=[{"string": "string"}], description="action arguments", default_factory=dict)
|
||||||
task_id: str = Field(examples=["task_id"], description="task uuid (auto-generated if empty)", default="")
|
task_id: str = Field(examples=["task_id"], description="task uuid (auto-generated if empty)", default="")
|
||||||
job_id: str = Field(examples=["job_id"], description="goal uuid (auto-generated if empty)", default="")
|
job_id: str = Field(examples=["job_id"], description="goal uuid (auto-generated if empty)", default="")
|
||||||
|
|||||||
@@ -4,8 +4,40 @@ UniLabOS 应用工具函数
|
|||||||
提供清理、重启等工具函数
|
提供清理、重启等工具函数
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import gc
|
import glob
|
||||||
import os
|
import os
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
def patch_rclpy_dll_windows():
|
||||||
|
"""在 Windows + conda 环境下为 rclpy 打 DLL 加载补丁"""
|
||||||
|
if sys.platform != "win32" or not os.environ.get("CONDA_PREFIX"):
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
import rclpy
|
||||||
|
|
||||||
|
return
|
||||||
|
except ImportError as e:
|
||||||
|
if not str(e).startswith("DLL load failed"):
|
||||||
|
return
|
||||||
|
cp = os.environ["CONDA_PREFIX"]
|
||||||
|
impl = os.path.join(cp, "Lib", "site-packages", "rclpy", "impl", "implementation_singleton.py")
|
||||||
|
pyd = glob.glob(os.path.join(cp, "Lib", "site-packages", "rclpy", "_rclpy_pybind11*.pyd"))
|
||||||
|
if not os.path.exists(impl) or not pyd:
|
||||||
|
return
|
||||||
|
with open(impl, "r", encoding="utf-8") as f:
|
||||||
|
content = f.read()
|
||||||
|
lib_bin = os.path.join(cp, "Library", "bin").replace("\\", "/")
|
||||||
|
patch = f'# UniLabOS DLL Patch\nimport os,ctypes\nos.add_dll_directory("{lib_bin}") if hasattr(os,"add_dll_directory") else None\ntry: ctypes.CDLL("{pyd[0].replace(chr(92),"/")}")\nexcept: pass\n# End Patch\n'
|
||||||
|
shutil.copy2(impl, impl + ".bak")
|
||||||
|
with open(impl, "w", encoding="utf-8") as f:
|
||||||
|
f.write(patch + content)
|
||||||
|
|
||||||
|
|
||||||
|
patch_rclpy_dll_windows()
|
||||||
|
|
||||||
|
import gc
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
|||||||
@@ -359,9 +359,7 @@ class HTTPClient:
|
|||||||
Returns:
|
Returns:
|
||||||
Dict: API响应数据,包含 code 和 data (uuid, name)
|
Dict: API响应数据,包含 code 和 data (uuid, name)
|
||||||
"""
|
"""
|
||||||
# target_lab_uuid 暂时使用默认值,后续由后端根据 ak/sk 获取
|
|
||||||
payload = {
|
payload = {
|
||||||
"target_lab_uuid": "28c38bb0-63f6-4352-b0d8-b5b8eb1766d5",
|
|
||||||
"name": name,
|
"name": name,
|
||||||
"data": {
|
"data": {
|
||||||
"workflow_uuid": workflow_uuid,
|
"workflow_uuid": workflow_uuid,
|
||||||
|
|||||||
@@ -327,6 +327,7 @@ def job_add(req: JobAddReq) -> JobData:
|
|||||||
queue_item,
|
queue_item,
|
||||||
action_type=action_type,
|
action_type=action_type,
|
||||||
action_kwargs=action_args,
|
action_kwargs=action_args,
|
||||||
|
sample_material=req.sample_material,
|
||||||
server_info=server_info,
|
server_info=server_info,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -540,7 +540,7 @@ class MessageProcessor:
|
|||||||
try:
|
try:
|
||||||
message_str = json.dumps(msg, ensure_ascii=False)
|
message_str = json.dumps(msg, ensure_ascii=False)
|
||||||
await self.websocket.send(message_str)
|
await self.websocket.send(message_str)
|
||||||
logger.trace(f"[MessageProcessor] Message sent: {msg.get('action', 'unknown')}") # type: ignore # noqa: E501
|
# logger.trace(f"[MessageProcessor] Message sent: {msg.get('action', 'unknown')}") # type: ignore # noqa: E501
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"[MessageProcessor] Failed to send message: {str(e)}")
|
logger.error(f"[MessageProcessor] Failed to send message: {str(e)}")
|
||||||
logger.error(traceback.format_exc())
|
logger.error(traceback.format_exc())
|
||||||
@@ -652,6 +652,8 @@ class MessageProcessor:
|
|||||||
async def _handle_job_start(self, data: Dict[str, Any]):
|
async def _handle_job_start(self, data: Dict[str, Any]):
|
||||||
"""处理job_start消息"""
|
"""处理job_start消息"""
|
||||||
try:
|
try:
|
||||||
|
if not data.get("sample_material"):
|
||||||
|
data["sample_material"] = {}
|
||||||
req = JobAddReq(**data)
|
req = JobAddReq(**data)
|
||||||
|
|
||||||
job_log = format_job_log(req.job_id, req.task_id, req.device_id, req.action)
|
job_log = format_job_log(req.job_id, req.task_id, req.device_id, req.action)
|
||||||
@@ -683,6 +685,7 @@ class MessageProcessor:
|
|||||||
queue_item,
|
queue_item,
|
||||||
action_type=req.action_type,
|
action_type=req.action_type,
|
||||||
action_kwargs=req.action_args,
|
action_kwargs=req.action_args,
|
||||||
|
sample_material=req.sample_material,
|
||||||
server_info=req.server_info,
|
server_info=req.server_info,
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -1294,7 +1297,7 @@ class WebSocketClient(BaseCommunicationClient):
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
self.message_processor.send_message(message)
|
self.message_processor.send_message(message)
|
||||||
logger.trace(f"[WebSocketClient] Device status published: {device_id}.{property_name}")
|
# logger.trace(f"[WebSocketClient] Device status published: {device_id}.{property_name}")
|
||||||
|
|
||||||
def publish_job_status(
|
def publish_job_status(
|
||||||
self, feedback_data: dict, item: QueueItem, status: str, return_info: Optional[dict] = None
|
self, feedback_data: dict, item: QueueItem, status: str, return_info: Optional[dict] = None
|
||||||
|
|||||||
@@ -95,8 +95,29 @@ def get_vessel_liquid_volume(G: nx.DiGraph, vessel: str) -> float:
|
|||||||
return total_volume
|
return total_volume
|
||||||
|
|
||||||
|
|
||||||
def is_integrated_pump(node_name):
|
def is_integrated_pump(node_class: str, node_name: str = "") -> bool:
|
||||||
return "pump" in node_name and "valve" in node_name
|
"""
|
||||||
|
判断是否为泵阀一体设备
|
||||||
|
"""
|
||||||
|
class_lower = (node_class or "").lower()
|
||||||
|
name_lower = (node_name or "").lower()
|
||||||
|
|
||||||
|
if "pump" not in class_lower and "pump" not in name_lower:
|
||||||
|
return False
|
||||||
|
|
||||||
|
integrated_markers = [
|
||||||
|
"valve",
|
||||||
|
"pump_valve",
|
||||||
|
"pumpvalve",
|
||||||
|
"integrated",
|
||||||
|
"transfer_pump",
|
||||||
|
]
|
||||||
|
|
||||||
|
for marker in integrated_markers:
|
||||||
|
if marker in class_lower or marker in name_lower:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def find_connected_pump(G, valve_node):
|
def find_connected_pump(G, valve_node):
|
||||||
@@ -186,7 +207,9 @@ def build_pump_valve_maps(G, pump_backbone):
|
|||||||
debug_print(f"🔧 过滤后的骨架: {filtered_backbone}")
|
debug_print(f"🔧 过滤后的骨架: {filtered_backbone}")
|
||||||
|
|
||||||
for node in filtered_backbone:
|
for node in filtered_backbone:
|
||||||
if is_integrated_pump(G.nodes[node]["class"]):
|
node_data = G.nodes.get(node, {})
|
||||||
|
node_class = node_data.get("class", "") or ""
|
||||||
|
if is_integrated_pump(node_class, node):
|
||||||
pumps_from_node[node] = node
|
pumps_from_node[node] = node
|
||||||
valve_from_node[node] = node
|
valve_from_node[node] = node
|
||||||
debug_print(f" - 集成泵-阀: {node}")
|
debug_print(f" - 集成泵-阀: {node}")
|
||||||
|
|||||||
0
unilabos/devices/Qone_nmr/__init__.py
Normal file
0
unilabos/devices/Qone_nmr/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -30,12 +30,36 @@ from pylabrobot.liquid_handling.standard import (
|
|||||||
ResourceMove,
|
ResourceMove,
|
||||||
ResourceDrop,
|
ResourceDrop,
|
||||||
)
|
)
|
||||||
from pylabrobot.resources import ResourceHolder, ResourceStack, Tip, Deck, Plate, Well, TipRack, Resource, Container, Coordinate, TipSpot, Trash, PlateAdapter, TubeRack, create_homogeneous_resources, create_ordered_items_2d
|
from pylabrobot.resources import (
|
||||||
|
ResourceHolder,
|
||||||
|
ResourceStack,
|
||||||
|
Tip,
|
||||||
|
Deck,
|
||||||
|
Plate,
|
||||||
|
Well,
|
||||||
|
TipRack,
|
||||||
|
Resource,
|
||||||
|
Container,
|
||||||
|
Coordinate,
|
||||||
|
TipSpot,
|
||||||
|
Trash,
|
||||||
|
PlateAdapter,
|
||||||
|
TubeRack,
|
||||||
|
create_homogeneous_resources,
|
||||||
|
)
|
||||||
|
|
||||||
from unilabos.devices.liquid_handling.liquid_handler_abstract import LiquidHandlerAbstract, SimpleReturn
|
from unilabos.devices.liquid_handling.liquid_handler_abstract import (
|
||||||
|
LiquidHandlerAbstract,
|
||||||
|
SimpleReturn,
|
||||||
|
SetLiquidReturn,
|
||||||
|
SetLiquidFromPlateReturn,
|
||||||
|
TransferLiquidReturn,
|
||||||
|
)
|
||||||
|
from unilabos.registry.placeholder_type import ResourceSlot
|
||||||
from unilabos.resources.itemized_carrier import ItemizedCarrier
|
from unilabos.resources.itemized_carrier import ItemizedCarrier
|
||||||
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode, ROS2DeviceNode
|
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode, ROS2DeviceNode
|
||||||
|
|
||||||
|
|
||||||
class PRCXIError(RuntimeError):
|
class PRCXIError(RuntimeError):
|
||||||
"""Lilith 返回 Success=false 时抛出的业务异常"""
|
"""Lilith 返回 Success=false 时抛出的业务异常"""
|
||||||
|
|
||||||
@@ -83,6 +107,7 @@ class PRCXI9300Deck(Deck):
|
|||||||
self.slots[slot - 1] = resource
|
self.slots[slot - 1] = resource
|
||||||
super().assign_child_resource(resource, location=self.slot_locations[slot - 1])
|
super().assign_child_resource(resource, location=self.slot_locations[slot - 1])
|
||||||
|
|
||||||
|
|
||||||
class PRCXI9300Container(Plate):
|
class PRCXI9300Container(Plate):
|
||||||
"""PRCXI 9300 的专用 Container 类,继承自 Plate,用于槽位定位和未知模块。
|
"""PRCXI 9300 的专用 Container 类,继承自 Plate,用于槽位定位和未知模块。
|
||||||
|
|
||||||
@@ -111,33 +136,49 @@ class PRCXI9300Container(Plate):
|
|||||||
def serialize_state(self) -> Dict[str, Dict[str, Any]]:
|
def serialize_state(self) -> Dict[str, Dict[str, Any]]:
|
||||||
data = super().serialize_state()
|
data = super().serialize_state()
|
||||||
data.update(self._unilabos_state)
|
data.update(self._unilabos_state)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class PRCXI9300Plate(Plate):
|
class PRCXI9300Plate(Plate):
|
||||||
"""
|
"""
|
||||||
专用孔板类:
|
专用孔板类:
|
||||||
1. 继承自 PLR 原生 Plate,保留所有物理特性。
|
1. 继承自 PLR 原生 Plate,保留所有物理特性。
|
||||||
2. 增加 material_info 参数,用于在初始化时直接绑定 Unilab UUID。
|
2. 增加 material_info 参数,用于在初始化时直接绑定 Unilab UUID。
|
||||||
"""
|
"""
|
||||||
def __init__(self, name: str, size_x: float, size_y: float, size_z: float,
|
|
||||||
category: str = "plate",
|
def __init__(
|
||||||
ordered_items: collections.OrderedDict = None,
|
self,
|
||||||
ordering: Optional[collections.OrderedDict] = None,
|
name: str,
|
||||||
model: Optional[str] = None,
|
size_x: float,
|
||||||
material_info: Optional[Dict[str, Any]] = None,
|
size_y: float,
|
||||||
**kwargs):
|
size_z: float,
|
||||||
|
category: str = "plate",
|
||||||
|
ordered_items: collections.OrderedDict = None,
|
||||||
|
ordering: Optional[collections.OrderedDict] = None,
|
||||||
|
model: Optional[str] = None,
|
||||||
|
material_info: Optional[Dict[str, Any]] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
# 如果 ordered_items 不为 None,直接使用
|
# 如果 ordered_items 不为 None,直接使用
|
||||||
|
items = None
|
||||||
|
ordering_param = None
|
||||||
if ordered_items is not None:
|
if ordered_items is not None:
|
||||||
items = ordered_items
|
items = ordered_items
|
||||||
elif ordering is not None:
|
elif ordering is not None:
|
||||||
# 检查 ordering 中的值是否是字符串(从 JSON 反序列化时的情况)
|
# 检查 ordering 中的值是否是字符串(从 JSON 反序列化时的情况)
|
||||||
# 如果是字符串,说明这是位置名称,需要让 Plate 自己创建 Well 对象
|
# 如果是字符串,说明这是位置名称,需要让 Plate 自己创建 Well 对象
|
||||||
# 我们只传递位置信息(键),不传递值,使用 ordering 参数
|
# 我们只传递位置信息(键),不传递值,使用 ordering 参数
|
||||||
if ordering and isinstance(next(iter(ordering.values()), None), str):
|
if ordering:
|
||||||
# ordering 的值是字符串,只使用键(位置信息)创建新的 OrderedDict
|
values = list(ordering.values())
|
||||||
# 传递 ordering 参数而不是 ordered_items,让 Plate 自己创建 Well 对象
|
value = values[0]
|
||||||
items = None
|
if isinstance(value, str):
|
||||||
# 使用 ordering 参数,只包含位置信息(键)
|
# ordering 的值是字符串,只使用键(位置信息)创建新的 OrderedDict
|
||||||
ordering_param = collections.OrderedDict((k, None) for k in ordering.keys())
|
# 传递 ordering 参数而不是 ordered_items,让 Plate 自己创建 Well 对象
|
||||||
|
items = None
|
||||||
|
# 使用 ordering 参数,只包含位置信息(键)
|
||||||
|
ordering_param = collections.OrderedDict((k, None) for k in ordering.keys())
|
||||||
|
elif value is None:
|
||||||
|
ordering_param = ordering
|
||||||
else:
|
else:
|
||||||
# ordering 的值是对象(可能是 Well 对象),检查是否有有效的 location
|
# ordering 的值是对象(可能是 Well 对象),检查是否有有效的 location
|
||||||
# 如果是反序列化过程,Well 对象可能没有正确的 location,需要让 Plate 重新创建
|
# 如果是反序列化过程,Well 对象可能没有正确的 location,需要让 Plate 重新创建
|
||||||
@@ -166,37 +207,31 @@ class PRCXI9300Plate(Plate):
|
|||||||
|
|
||||||
# 根据情况传递不同的参数
|
# 根据情况传递不同的参数
|
||||||
if items is not None:
|
if items is not None:
|
||||||
super().__init__(name, size_x, size_y, size_z,
|
super().__init__(
|
||||||
ordered_items=items,
|
name, size_x, size_y, size_z, ordered_items=items, category=category, model=model, **kwargs
|
||||||
category=category,
|
)
|
||||||
model=model, **kwargs)
|
|
||||||
elif ordering_param is not None:
|
elif ordering_param is not None:
|
||||||
# 传递 ordering 参数,让 Plate 自己创建 Well 对象
|
# 传递 ordering 参数,让 Plate 自己创建 Well 对象
|
||||||
super().__init__(name, size_x, size_y, size_z,
|
super().__init__(
|
||||||
ordering=ordering_param,
|
name, size_x, size_y, size_z, ordering=ordering_param, category=category, model=model, **kwargs
|
||||||
category=category,
|
)
|
||||||
model=model, **kwargs)
|
|
||||||
else:
|
else:
|
||||||
super().__init__(name, size_x, size_y, size_z,
|
super().__init__(name, size_x, size_y, size_z, category=category, model=model, **kwargs)
|
||||||
category=category,
|
|
||||||
model=model, **kwargs)
|
|
||||||
|
|
||||||
self._unilabos_state = {}
|
self._unilabos_state = {}
|
||||||
if material_info:
|
if material_info:
|
||||||
self._unilabos_state["Material"] = material_info
|
self._unilabos_state["Material"] = material_info
|
||||||
|
|
||||||
|
|
||||||
def load_state(self, state: Dict[str, Any]) -> None:
|
def load_state(self, state: Dict[str, Any]) -> None:
|
||||||
super().load_state(state)
|
super().load_state(state)
|
||||||
self._unilabos_state = state
|
self._unilabos_state = state
|
||||||
|
|
||||||
|
|
||||||
def serialize_state(self) -> Dict[str, Dict[str, Any]]:
|
def serialize_state(self) -> Dict[str, Dict[str, Any]]:
|
||||||
try:
|
try:
|
||||||
data = super().serialize_state()
|
data = super().serialize_state()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
data = {}
|
data = {}
|
||||||
if hasattr(self, '_unilabos_state') and self._unilabos_state:
|
if hasattr(self, "_unilabos_state") and self._unilabos_state:
|
||||||
safe_state = {}
|
safe_state = {}
|
||||||
for k, v in self._unilabos_state.items():
|
for k, v in self._unilabos_state.items():
|
||||||
# 如果是 Material 字典,深入检查
|
# 如果是 Material 字典,深入检查
|
||||||
@@ -209,23 +244,32 @@ class PRCXI9300Plate(Plate):
|
|||||||
else:
|
else:
|
||||||
# 打印日志提醒(可选)
|
# 打印日志提醒(可选)
|
||||||
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
|
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
|
||||||
pass
|
pass
|
||||||
safe_state[k] = safe_material
|
safe_state[k] = safe_material
|
||||||
# 其他顶层属性也进行类型检查
|
# 其他顶层属性也进行类型检查
|
||||||
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
|
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
|
||||||
safe_state[k] = v
|
safe_state[k] = v
|
||||||
|
|
||||||
data.update(safe_state)
|
data.update(safe_state)
|
||||||
return data # 其他顶层属性也进行类型检查
|
return data # 其他顶层属性也进行类型检查
|
||||||
|
|
||||||
|
|
||||||
class PRCXI9300TipRack(TipRack):
|
class PRCXI9300TipRack(TipRack):
|
||||||
""" 专用吸头盒类 """
|
"""专用吸头盒类"""
|
||||||
def __init__(self, name: str, size_x: float, size_y: float, size_z: float,
|
|
||||||
category: str = "tip_rack",
|
def __init__(
|
||||||
ordered_items: collections.OrderedDict = None,
|
self,
|
||||||
ordering: Optional[collections.OrderedDict] = None,
|
name: str,
|
||||||
model: Optional[str] = None,
|
size_x: float,
|
||||||
material_info: Optional[Dict[str, Any]] = None,
|
size_y: float,
|
||||||
**kwargs):
|
size_z: float,
|
||||||
|
category: str = "tip_rack",
|
||||||
|
ordered_items: collections.OrderedDict = None,
|
||||||
|
ordering: Optional[collections.OrderedDict] = None,
|
||||||
|
model: Optional[str] = None,
|
||||||
|
material_info: Optional[Dict[str, Any]] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
# 如果 ordered_items 不为 None,直接使用
|
# 如果 ordered_items 不为 None,直接使用
|
||||||
if ordered_items is not None:
|
if ordered_items is not None:
|
||||||
items = ordered_items
|
items = ordered_items
|
||||||
@@ -253,27 +297,23 @@ class PRCXI9300TipRack(TipRack):
|
|||||||
else:
|
else:
|
||||||
items = None
|
items = None
|
||||||
ordering_param = None
|
ordering_param = None
|
||||||
|
|
||||||
# 根据情况传递不同的参数
|
# 根据情况传递不同的参数
|
||||||
if items is not None:
|
if items is not None:
|
||||||
super().__init__(name, size_x, size_y, size_z,
|
super().__init__(
|
||||||
ordered_items=items,
|
name, size_x, size_y, size_z, ordered_items=items, category=category, model=model, **kwargs
|
||||||
category=category,
|
)
|
||||||
model=model, **kwargs)
|
|
||||||
elif ordering_param is not None:
|
elif ordering_param is not None:
|
||||||
# 传递 ordering 参数,让 TipRack 自己创建 Tip 对象
|
# 传递 ordering 参数,让 TipRack 自己创建 Tip 对象
|
||||||
super().__init__(name, size_x, size_y, size_z,
|
super().__init__(
|
||||||
ordering=ordering_param,
|
name, size_x, size_y, size_z, ordering=ordering_param, category=category, model=model, **kwargs
|
||||||
category=category,
|
)
|
||||||
model=model, **kwargs)
|
|
||||||
else:
|
else:
|
||||||
super().__init__(name, size_x, size_y, size_z,
|
super().__init__(name, size_x, size_y, size_z, category=category, model=model, **kwargs)
|
||||||
category=category,
|
|
||||||
model=model, **kwargs)
|
|
||||||
self._unilabos_state = {}
|
self._unilabos_state = {}
|
||||||
if material_info:
|
if material_info:
|
||||||
self._unilabos_state["Material"] = material_info
|
self._unilabos_state["Material"] = material_info
|
||||||
|
|
||||||
def load_state(self, state: Dict[str, Any]) -> None:
|
def load_state(self, state: Dict[str, Any]) -> None:
|
||||||
super().load_state(state)
|
super().load_state(state)
|
||||||
self._unilabos_state = state
|
self._unilabos_state = state
|
||||||
@@ -283,7 +323,7 @@ class PRCXI9300TipRack(TipRack):
|
|||||||
data = super().serialize_state()
|
data = super().serialize_state()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
data = {}
|
data = {}
|
||||||
if hasattr(self, '_unilabos_state') and self._unilabos_state:
|
if hasattr(self, "_unilabos_state") and self._unilabos_state:
|
||||||
safe_state = {}
|
safe_state = {}
|
||||||
for k, v in self._unilabos_state.items():
|
for k, v in self._unilabos_state.items():
|
||||||
# 如果是 Material 字典,深入检查
|
# 如果是 Material 字典,深入检查
|
||||||
@@ -296,15 +336,16 @@ class PRCXI9300TipRack(TipRack):
|
|||||||
else:
|
else:
|
||||||
# 打印日志提醒(可选)
|
# 打印日志提醒(可选)
|
||||||
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
|
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
|
||||||
pass
|
pass
|
||||||
safe_state[k] = safe_material
|
safe_state[k] = safe_material
|
||||||
# 其他顶层属性也进行类型检查
|
# 其他顶层属性也进行类型检查
|
||||||
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
|
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
|
||||||
safe_state[k] = v
|
safe_state[k] = v
|
||||||
|
|
||||||
data.update(safe_state)
|
data.update(safe_state)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class PRCXI9300Trash(Trash):
|
class PRCXI9300Trash(Trash):
|
||||||
"""PRCXI 9300 的专用 Trash 类,继承自 Trash。
|
"""PRCXI 9300 的专用 Trash 类,继承自 Trash。
|
||||||
|
|
||||||
@@ -334,7 +375,7 @@ class PRCXI9300Trash(Trash):
|
|||||||
data = super().serialize_state()
|
data = super().serialize_state()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
data = {}
|
data = {}
|
||||||
if hasattr(self, '_unilabos_state') and self._unilabos_state:
|
if hasattr(self, "_unilabos_state") and self._unilabos_state:
|
||||||
safe_state = {}
|
safe_state = {}
|
||||||
for k, v in self._unilabos_state.items():
|
for k, v in self._unilabos_state.items():
|
||||||
# 如果是 Material 字典,深入检查
|
# 如果是 Material 字典,深入检查
|
||||||
@@ -347,29 +388,37 @@ class PRCXI9300Trash(Trash):
|
|||||||
else:
|
else:
|
||||||
# 打印日志提醒(可选)
|
# 打印日志提醒(可选)
|
||||||
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
|
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
|
||||||
pass
|
pass
|
||||||
safe_state[k] = safe_material
|
safe_state[k] = safe_material
|
||||||
# 其他顶层属性也进行类型检查
|
# 其他顶层属性也进行类型检查
|
||||||
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
|
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
|
||||||
safe_state[k] = v
|
safe_state[k] = v
|
||||||
|
|
||||||
data.update(safe_state)
|
data.update(safe_state)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class PRCXI9300TubeRack(TubeRack):
|
class PRCXI9300TubeRack(TubeRack):
|
||||||
"""
|
"""
|
||||||
专用管架类:用于 EP 管架、试管架等。
|
专用管架类:用于 EP 管架、试管架等。
|
||||||
继承自 PLR 的 TubeRack,并支持注入 material_info (UUID)。
|
继承自 PLR 的 TubeRack,并支持注入 material_info (UUID)。
|
||||||
"""
|
"""
|
||||||
def __init__(self, name: str, size_x: float, size_y: float, size_z: float,
|
|
||||||
category: str = "tube_rack",
|
def __init__(
|
||||||
items: Optional[Dict[str, Any]] = None,
|
self,
|
||||||
ordered_items: Optional[OrderedDict] = None,
|
name: str,
|
||||||
ordering: Optional[OrderedDict] = None,
|
size_x: float,
|
||||||
model: Optional[str] = None,
|
size_y: float,
|
||||||
material_info: Optional[Dict[str, Any]] = None,
|
size_z: float,
|
||||||
**kwargs):
|
category: str = "tube_rack",
|
||||||
|
items: Optional[Dict[str, Any]] = None,
|
||||||
|
ordered_items: Optional[OrderedDict] = None,
|
||||||
|
ordering: Optional[OrderedDict] = None,
|
||||||
|
model: Optional[str] = None,
|
||||||
|
material_info: Optional[Dict[str, Any]] = None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
|
||||||
# 如果 ordered_items 不为 None,直接使用
|
# 如果 ordered_items 不为 None,直接使用
|
||||||
if ordered_items is not None:
|
if ordered_items is not None:
|
||||||
items_to_pass = ordered_items
|
items_to_pass = ordered_items
|
||||||
@@ -380,7 +429,7 @@ class PRCXI9300TubeRack(TubeRack):
|
|||||||
# ordering 的值是字符串,这种情况下我们让 TubeRack 使用默认行为
|
# ordering 的值是字符串,这种情况下我们让 TubeRack 使用默认行为
|
||||||
# 不在初始化时创建 items,而是在 deserialize 后处理
|
# 不在初始化时创建 items,而是在 deserialize 后处理
|
||||||
items_to_pass = None
|
items_to_pass = None
|
||||||
ordering_param = collections.OrderedDict() # 提供空的 ordering 来满足要求
|
ordering_param = collections.OrderedDict((k, None) for k in ordering.keys()) # 提供空的 ordering 来满足要求
|
||||||
# 保存 ordering 信息以便后续处理
|
# 保存 ordering 信息以便后续处理
|
||||||
self._temp_ordering = ordering
|
self._temp_ordering = ordering
|
||||||
else:
|
else:
|
||||||
@@ -401,25 +450,15 @@ class PRCXI9300TubeRack(TubeRack):
|
|||||||
else:
|
else:
|
||||||
items_to_pass = None
|
items_to_pass = None
|
||||||
ordering_param = None
|
ordering_param = None
|
||||||
|
|
||||||
# 根据情况传递不同的参数
|
# 根据情况传递不同的参数
|
||||||
if items_to_pass is not None:
|
if items_to_pass is not None:
|
||||||
super().__init__(name, size_x, size_y, size_z,
|
super().__init__(name, size_x, size_y, size_z, ordered_items=items_to_pass, model=model, **kwargs)
|
||||||
ordered_items=items_to_pass,
|
|
||||||
model=model,
|
|
||||||
**kwargs)
|
|
||||||
elif ordering_param is not None:
|
elif ordering_param is not None:
|
||||||
# 直接调用 ItemizedResource 的构造函数来处理 ordering
|
# 传递 ordering 参数,让 TubeRack 自己创建 Tube 对象
|
||||||
from pylabrobot.resources import ItemizedResource
|
super().__init__(name, size_x, size_y, size_z, ordering=ordering_param, model=model, **kwargs)
|
||||||
ItemizedResource.__init__(self, name, size_x, size_y, size_z,
|
|
||||||
ordering=ordering_param,
|
|
||||||
category=category,
|
|
||||||
model=model,
|
|
||||||
**kwargs)
|
|
||||||
else:
|
else:
|
||||||
super().__init__(name, size_x, size_y, size_z,
|
super().__init__(name, size_x, size_y, size_z, model=model, **kwargs)
|
||||||
model=model,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
self._unilabos_state = {}
|
self._unilabos_state = {}
|
||||||
if material_info:
|
if material_info:
|
||||||
@@ -442,7 +481,7 @@ class PRCXI9300TubeRack(TubeRack):
|
|||||||
|
|
||||||
# 清理临时数据
|
# 清理临时数据
|
||||||
del self._temp_ordering
|
del self._temp_ordering
|
||||||
|
|
||||||
def load_state(self, state: Dict[str, Any]) -> None:
|
def load_state(self, state: Dict[str, Any]) -> None:
|
||||||
"""从给定的状态加载工作台信息。"""
|
"""从给定的状态加载工作台信息。"""
|
||||||
# super().load_state(state)
|
# super().load_state(state)
|
||||||
@@ -453,7 +492,7 @@ class PRCXI9300TubeRack(TubeRack):
|
|||||||
data = super().serialize_state()
|
data = super().serialize_state()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
data = {}
|
data = {}
|
||||||
if hasattr(self, '_unilabos_state') and self._unilabos_state:
|
if hasattr(self, "_unilabos_state") and self._unilabos_state:
|
||||||
safe_state = {}
|
safe_state = {}
|
||||||
for k, v in self._unilabos_state.items():
|
for k, v in self._unilabos_state.items():
|
||||||
# 如果是 Material 字典,深入检查
|
# 如果是 Material 字典,深入检查
|
||||||
@@ -466,12 +505,12 @@ class PRCXI9300TubeRack(TubeRack):
|
|||||||
else:
|
else:
|
||||||
# 打印日志提醒(可选)
|
# 打印日志提醒(可选)
|
||||||
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
|
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
|
||||||
pass
|
pass
|
||||||
safe_state[k] = safe_material
|
safe_state[k] = safe_material
|
||||||
# 其他顶层属性也进行类型检查
|
# 其他顶层属性也进行类型检查
|
||||||
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
|
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
|
||||||
safe_state[k] = v
|
safe_state[k] = v
|
||||||
|
|
||||||
data.update(safe_state)
|
data.update(safe_state)
|
||||||
return data
|
return data
|
||||||
class PRCXI9300PlateAdapterSite(ItemizedCarrier):
|
class PRCXI9300PlateAdapterSite(ItemizedCarrier):
|
||||||
@@ -566,24 +605,32 @@ class PRCXI9300PlateAdapterSite(ItemizedCarrier):
|
|||||||
if 'sites' in state:
|
if 'sites' in state:
|
||||||
self.sites = [state['sites']]
|
self.sites = [state['sites']]
|
||||||
|
|
||||||
|
|
||||||
class PRCXI9300PlateAdapter(PlateAdapter):
|
class PRCXI9300PlateAdapter(PlateAdapter):
|
||||||
"""
|
"""
|
||||||
专用板式适配器类:用于承载 Plate 的底座(如 PCR 适配器、磁吸架等)。
|
专用板式适配器类:用于承载 Plate 的底座(如 PCR 适配器、磁吸架等)。
|
||||||
支持注入 material_info (UUID)。
|
支持注入 material_info (UUID)。
|
||||||
"""
|
"""
|
||||||
def __init__(self, name: str, size_x: float, size_y: float, size_z: float,
|
|
||||||
category: str = "plate_adapter",
|
def __init__(
|
||||||
model: Optional[str] = None,
|
self,
|
||||||
material_info: Optional[Dict[str, Any]] = None,
|
name: str,
|
||||||
# 参数给予默认值 (标准96孔板尺寸)
|
size_x: float,
|
||||||
adapter_hole_size_x: float = 127.76,
|
size_y: float,
|
||||||
adapter_hole_size_y: float = 85.48,
|
size_z: float,
|
||||||
adapter_hole_size_z: float = 10.0, # 假设凹槽深度或板子放置高度
|
category: str = "plate_adapter",
|
||||||
dx: Optional[float] = None,
|
model: Optional[str] = None,
|
||||||
dy: Optional[float] = None,
|
material_info: Optional[Dict[str, Any]] = None,
|
||||||
dz: float = 0.0, # 默认Z轴偏移
|
# 参数给予默认值 (标准96孔板尺寸)
|
||||||
**kwargs):
|
adapter_hole_size_x: float = 127.76,
|
||||||
|
adapter_hole_size_y: float = 85.48,
|
||||||
|
adapter_hole_size_z: float = 10.0, # 假设凹槽深度或板子放置高度
|
||||||
|
dx: Optional[float] = None,
|
||||||
|
dy: Optional[float] = None,
|
||||||
|
dz: float = 0.0, # 默认Z轴偏移
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
|
||||||
# 自动居中计算:如果未指定 dx/dy,则根据适配器尺寸和孔尺寸计算居中位置
|
# 自动居中计算:如果未指定 dx/dy,则根据适配器尺寸和孔尺寸计算居中位置
|
||||||
if dx is None:
|
if dx is None:
|
||||||
dx = (size_x - adapter_hole_size_x) / 2
|
dx = (size_x - adapter_hole_size_x) / 2
|
||||||
@@ -591,20 +638,20 @@ class PRCXI9300PlateAdapter(PlateAdapter):
|
|||||||
dy = (size_y - adapter_hole_size_y) / 2
|
dy = (size_y - adapter_hole_size_y) / 2
|
||||||
|
|
||||||
super().__init__(
|
super().__init__(
|
||||||
name=name,
|
name=name,
|
||||||
size_x=size_x,
|
size_x=size_x,
|
||||||
size_y=size_y,
|
size_y=size_y,
|
||||||
size_z=size_z,
|
size_z=size_z,
|
||||||
dx=dx,
|
dx=dx,
|
||||||
dy=dy,
|
dy=dy,
|
||||||
dz=dz,
|
dz=dz,
|
||||||
adapter_hole_size_x=adapter_hole_size_x,
|
adapter_hole_size_x=adapter_hole_size_x,
|
||||||
adapter_hole_size_y=adapter_hole_size_y,
|
adapter_hole_size_y=adapter_hole_size_y,
|
||||||
adapter_hole_size_z=adapter_hole_size_z,
|
adapter_hole_size_z=adapter_hole_size_z,
|
||||||
model=model,
|
model=model,
|
||||||
**kwargs
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
self._unilabos_state = {}
|
self._unilabos_state = {}
|
||||||
if material_info:
|
if material_info:
|
||||||
self._unilabos_state["Material"] = material_info
|
self._unilabos_state["Material"] = material_info
|
||||||
@@ -614,7 +661,7 @@ class PRCXI9300PlateAdapter(PlateAdapter):
|
|||||||
data = super().serialize_state()
|
data = super().serialize_state()
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
data = {}
|
data = {}
|
||||||
if hasattr(self, '_unilabos_state') and self._unilabos_state:
|
if hasattr(self, "_unilabos_state") and self._unilabos_state:
|
||||||
safe_state = {}
|
safe_state = {}
|
||||||
for k, v in self._unilabos_state.items():
|
for k, v in self._unilabos_state.items():
|
||||||
# 如果是 Material 字典,深入检查
|
# 如果是 Material 字典,深入检查
|
||||||
@@ -627,15 +674,16 @@ class PRCXI9300PlateAdapter(PlateAdapter):
|
|||||||
else:
|
else:
|
||||||
# 打印日志提醒(可选)
|
# 打印日志提醒(可选)
|
||||||
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
|
# print(f"Warning: Removing non-serializable key {mk} from {self.name}")
|
||||||
pass
|
pass
|
||||||
safe_state[k] = safe_material
|
safe_state[k] = safe_material
|
||||||
# 其他顶层属性也进行类型检查
|
# 其他顶层属性也进行类型检查
|
||||||
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
|
elif isinstance(v, (str, int, float, bool, list, dict, type(None))):
|
||||||
safe_state[k] = v
|
safe_state[k] = v
|
||||||
|
|
||||||
data.update(safe_state)
|
data.update(safe_state)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
class PRCXI9300Handler(LiquidHandlerAbstract):
|
class PRCXI9300Handler(LiquidHandlerAbstract):
|
||||||
support_touch_tip = False
|
support_touch_tip = False
|
||||||
|
|
||||||
@@ -751,9 +799,14 @@ class PRCXI9300Handler(LiquidHandlerAbstract):
|
|||||||
super().post_init(ros_node)
|
super().post_init(ros_node)
|
||||||
self._unilabos_backend.post_init(ros_node)
|
self._unilabos_backend.post_init(ros_node)
|
||||||
|
|
||||||
def set_liquid(self, wells: list[Well], liquid_names: list[str], volumes: list[float]) -> SimpleReturn:
|
def set_liquid(self, wells: list[Well], liquid_names: list[str], volumes: list[float]) -> SetLiquidReturn:
|
||||||
return super().set_liquid(wells, liquid_names, volumes)
|
return super().set_liquid(wells, liquid_names, volumes)
|
||||||
|
|
||||||
|
def set_liquid_from_plate(
|
||||||
|
self, plate: ResourceSlot, well_names: list[str], liquid_names: list[str], volumes: list[float]
|
||||||
|
) -> SetLiquidFromPlateReturn:
|
||||||
|
return super().set_liquid_from_plate(plate, well_names, liquid_names, volumes)
|
||||||
|
|
||||||
def set_group(self, group_name: str, wells: List[Well], volumes: List[float]):
|
def set_group(self, group_name: str, wells: List[Well], volumes: List[float]):
|
||||||
return super().set_group(group_name, wells, volumes)
|
return super().set_group(group_name, wells, volumes)
|
||||||
|
|
||||||
@@ -873,8 +926,11 @@ class PRCXI9300Handler(LiquidHandlerAbstract):
|
|||||||
mix_liquid_height: Optional[float] = None,
|
mix_liquid_height: Optional[float] = None,
|
||||||
delays: Optional[List[int]] = None,
|
delays: Optional[List[int]] = None,
|
||||||
none_keys: List[str] = [],
|
none_keys: List[str] = [],
|
||||||
):
|
) -> TransferLiquidReturn:
|
||||||
return await super().transfer_liquid(
|
if self.step_mode:
|
||||||
|
self._unilabos_backend.create_protocol(f"step_mode_protocol_{time.time()}")
|
||||||
|
|
||||||
|
res =await super().transfer_liquid(
|
||||||
sources,
|
sources,
|
||||||
targets,
|
targets,
|
||||||
tip_racks,
|
tip_racks,
|
||||||
@@ -896,7 +952,33 @@ class PRCXI9300Handler(LiquidHandlerAbstract):
|
|||||||
mix_liquid_height=mix_liquid_height,
|
mix_liquid_height=mix_liquid_height,
|
||||||
delays=delays,
|
delays=delays,
|
||||||
none_keys=none_keys,
|
none_keys=none_keys,
|
||||||
)
|
)
|
||||||
|
self._unilabos_backend.run_protocol()
|
||||||
|
return res
|
||||||
|
else:
|
||||||
|
return await super().transfer_liquid(
|
||||||
|
sources,
|
||||||
|
targets,
|
||||||
|
tip_racks,
|
||||||
|
use_channels=use_channels,
|
||||||
|
asp_vols=asp_vols,
|
||||||
|
dis_vols=dis_vols,
|
||||||
|
asp_flow_rates=asp_flow_rates,
|
||||||
|
dis_flow_rates=dis_flow_rates,
|
||||||
|
offsets=offsets,
|
||||||
|
touch_tip=touch_tip,
|
||||||
|
liquid_height=liquid_height,
|
||||||
|
blow_out_air_volume=blow_out_air_volume,
|
||||||
|
spread=spread,
|
||||||
|
is_96_well=is_96_well,
|
||||||
|
mix_stage=mix_stage,
|
||||||
|
mix_times=mix_times,
|
||||||
|
mix_vol=mix_vol,
|
||||||
|
mix_rate=mix_rate,
|
||||||
|
mix_liquid_height=mix_liquid_height,
|
||||||
|
delays=delays,
|
||||||
|
none_keys=none_keys,
|
||||||
|
)
|
||||||
|
|
||||||
async def custom_delay(self, seconds=0, msg=None):
|
async def custom_delay(self, seconds=0, msg=None):
|
||||||
return await super().custom_delay(seconds, msg)
|
return await super().custom_delay(seconds, msg)
|
||||||
@@ -1011,8 +1093,14 @@ class PRCXI9300Handler(LiquidHandlerAbstract):
|
|||||||
async def shaker_action(self, time: int, module_no: int, amplitude: int, is_wait: bool):
|
async def shaker_action(self, time: int, module_no: int, amplitude: int, is_wait: bool):
|
||||||
return await self._unilabos_backend.shaker_action(time, module_no, amplitude, is_wait)
|
return await self._unilabos_backend.shaker_action(time, module_no, amplitude, is_wait)
|
||||||
|
|
||||||
|
async def magnetic_action(self, time: int, module_no: int, height: int, is_wait: bool):
|
||||||
|
return await self._unilabos_backend.magnetic_action(time, module_no, height, is_wait)
|
||||||
|
|
||||||
|
async def shaking_incubation_action(self, time: int, module_no: int, amplitude: int, is_wait: bool, temperature: int):
|
||||||
|
return await self._unilabos_backend.shaking_incubation_action(time, module_no, amplitude, is_wait, temperature)
|
||||||
async def heater_action(self, temperature: float, time: int):
|
async def heater_action(self, temperature: float, time: int):
|
||||||
return await self._unilabos_backend.heater_action(temperature, time)
|
return await self._unilabos_backend.heater_action(temperature, time)
|
||||||
|
|
||||||
async def move_plate(
|
async def move_plate(
|
||||||
self,
|
self,
|
||||||
plate: Plate,
|
plate: Plate,
|
||||||
@@ -1035,7 +1123,7 @@ class PRCXI9300Handler(LiquidHandlerAbstract):
|
|||||||
drop_direction,
|
drop_direction,
|
||||||
pickup_direction,
|
pickup_direction,
|
||||||
pickup_distance_from_top,
|
pickup_distance_from_top,
|
||||||
target_plate_number = to,
|
target_plate_number=to,
|
||||||
**backend_kwargs,
|
**backend_kwargs,
|
||||||
)
|
)
|
||||||
plate.unassign()
|
plate.unassign()
|
||||||
@@ -1045,6 +1133,7 @@ class PRCXI9300Handler(LiquidHandlerAbstract):
|
|||||||
})
|
})
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
class PRCXI9300Backend(LiquidHandlerBackend):
|
class PRCXI9300Backend(LiquidHandlerBackend):
|
||||||
"""PRCXI 9300 的后端实现,继承自 LiquidHandlerBackend。
|
"""PRCXI 9300 的后端实现,继承自 LiquidHandlerBackend。
|
||||||
|
|
||||||
@@ -1116,19 +1205,39 @@ class PRCXI9300Backend(LiquidHandlerBackend):
|
|||||||
self.steps_todo_list.append(step)
|
self.steps_todo_list.append(step)
|
||||||
return step
|
return step
|
||||||
|
|
||||||
|
async def shaking_incubation_action(self, time: int, module_no: int, amplitude: int, is_wait: bool, temperature: int):
|
||||||
|
step = self.api_client.shaking_incubation_action(
|
||||||
|
time=time,
|
||||||
|
module_no=module_no,
|
||||||
|
amplitude=amplitude,
|
||||||
|
is_wait=is_wait,
|
||||||
|
temperature=temperature,
|
||||||
|
)
|
||||||
|
self.steps_todo_list.append(step)
|
||||||
|
return step
|
||||||
|
|
||||||
|
async def magnetic_action(self, time: int, module_no: int, height: int, is_wait: bool):
|
||||||
|
step = self.api_client.magnetic_action(
|
||||||
|
time=time,
|
||||||
|
module_no=module_no,
|
||||||
|
height=height,
|
||||||
|
is_wait=is_wait,
|
||||||
|
)
|
||||||
|
self.steps_todo_list.append(step)
|
||||||
|
return step
|
||||||
|
|
||||||
async def pick_up_resource(self, pickup: ResourcePickup, **backend_kwargs):
|
async def pick_up_resource(self, pickup: ResourcePickup, **backend_kwargs):
|
||||||
|
|
||||||
resource=pickup.resource
|
resource = pickup.resource
|
||||||
offset=pickup.offset
|
offset = pickup.offset
|
||||||
pickup_distance_from_top=pickup.pickup_distance_from_top
|
pickup_distance_from_top = pickup.pickup_distance_from_top
|
||||||
direction=pickup.direction
|
direction = pickup.direction
|
||||||
|
|
||||||
plate_number = int(resource.parent.name.replace("T", ""))
|
plate_number = int(resource.parent.name.replace("T", ""))
|
||||||
is_whole_plate = True
|
is_whole_plate = True
|
||||||
balance_height = 0
|
balance_height = 0
|
||||||
step = self.api_client.clamp_jaw_pick_up(plate_number, is_whole_plate, balance_height)
|
step = self.api_client.clamp_jaw_pick_up(plate_number, is_whole_plate, balance_height)
|
||||||
|
|
||||||
self.steps_todo_list.append(step)
|
self.steps_todo_list.append(step)
|
||||||
return step
|
return step
|
||||||
|
|
||||||
@@ -1147,7 +1256,6 @@ class PRCXI9300Backend(LiquidHandlerBackend):
|
|||||||
self.steps_todo_list.append(step)
|
self.steps_todo_list.append(step)
|
||||||
return step
|
return step
|
||||||
|
|
||||||
|
|
||||||
async def heater_action(self, temperature: float, time: int):
|
async def heater_action(self, temperature: float, time: int):
|
||||||
print(f"\n\nHeater action: temperature={temperature}, time={time}\n\n")
|
print(f"\n\nHeater action: temperature={temperature}, time={time}\n\n")
|
||||||
# return await self.api_client.heater_action(temperature, time)
|
# return await self.api_client.heater_action(temperature, time)
|
||||||
@@ -1160,8 +1268,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
|
|||||||
protocol_name = f"protocol_{time.time()}"
|
protocol_name = f"protocol_{time.time()}"
|
||||||
self.protocol_name = protocol_name
|
self.protocol_name = protocol_name
|
||||||
self.steps_todo_list = []
|
self.steps_todo_list = []
|
||||||
matrices = self.api_client.matrix_by_id("5de524d0-3f95-406c-86dd-f83626ebc7cb")["WorkTablets"]
|
|
||||||
|
|
||||||
if not len(self.matrix_id):
|
if not len(self.matrix_id):
|
||||||
self.matrix_id = str(uuid.uuid4())
|
self.matrix_id = str(uuid.uuid4())
|
||||||
|
|
||||||
@@ -1218,23 +1325,23 @@ class PRCXI9300Backend(LiquidHandlerBackend):
|
|||||||
error_code = self.api_client.get_error_code()
|
error_code = self.api_client.get_error_code()
|
||||||
if error_code:
|
if error_code:
|
||||||
print(f"PRCXI9300 error code detected: {error_code}")
|
print(f"PRCXI9300 error code detected: {error_code}")
|
||||||
|
|
||||||
# 清除错误代码
|
# 清除错误代码
|
||||||
self.api_client.clear_error_code()
|
self.api_client.clear_error_code()
|
||||||
print("PRCXI9300 error code cleared.")
|
print("PRCXI9300 error code cleared.")
|
||||||
self.api_client.call("IAutomation", "Stop")
|
self.api_client.call("IAutomation", "Stop")
|
||||||
# 执行重置
|
# 执行重置
|
||||||
# print("Starting PRCXI9300 reset...")
|
print("Starting PRCXI9300 reset...")
|
||||||
# self.api_client.call("IAutomation", "Reset")
|
self.api_client.call("IAutomation", "Reset")
|
||||||
|
|
||||||
# # 检查重置状态并等待完成
|
# 检查重置状态并等待完成
|
||||||
# while not self.is_reset_ok:
|
while not self.is_reset_ok:
|
||||||
# print("Waiting for PRCXI9300 to reset...")
|
print("Waiting for PRCXI9300 to reset...")
|
||||||
# if hasattr(self, '_ros_node') and self._ros_node is not None:
|
if hasattr(self, "_ros_node") and self._ros_node is not None:
|
||||||
# await self._ros_node.sleep(1)
|
await self._ros_node.sleep(1)
|
||||||
# else:
|
else:
|
||||||
# await asyncio.sleep(1)
|
await asyncio.sleep(1)
|
||||||
# print("PRCXI9300 reset successfully.")
|
print("PRCXI9300 reset successfully.")
|
||||||
|
|
||||||
self.api_client.update_clamp_jaw_position(self.matrix_id, self.plate_positions)
|
self.api_client.update_clamp_jaw_position(self.matrix_id, self.plate_positions)
|
||||||
|
|
||||||
@@ -1251,7 +1358,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
|
|||||||
"""Pick up tips from the specified resource."""
|
"""Pick up tips from the specified resource."""
|
||||||
# INSERT_YOUR_CODE
|
# INSERT_YOUR_CODE
|
||||||
# Ensure use_channels is converted to a list of ints if it's an array
|
# Ensure use_channels is converted to a list of ints if it's an array
|
||||||
if hasattr(use_channels, 'tolist'):
|
if hasattr(use_channels, "tolist"):
|
||||||
_use_channels = use_channels.tolist()
|
_use_channels = use_channels.tolist()
|
||||||
else:
|
else:
|
||||||
_use_channels = list(use_channels) if use_channels is not None else None
|
_use_channels = list(use_channels) if use_channels is not None else None
|
||||||
@@ -1305,7 +1412,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
|
|||||||
|
|
||||||
async def drop_tips(self, ops: List[Drop], use_channels: List[int] = None):
|
async def drop_tips(self, ops: List[Drop], use_channels: List[int] = None):
|
||||||
"""Pick up tips from the specified resource."""
|
"""Pick up tips from the specified resource."""
|
||||||
if hasattr(use_channels, 'tolist'):
|
if hasattr(use_channels, "tolist"):
|
||||||
_use_channels = use_channels.tolist()
|
_use_channels = use_channels.tolist()
|
||||||
else:
|
else:
|
||||||
_use_channels = list(use_channels) if use_channels is not None else None
|
_use_channels = list(use_channels) if use_channels is not None else None
|
||||||
@@ -1388,7 +1495,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
|
|||||||
none_keys: List[str] = [],
|
none_keys: List[str] = [],
|
||||||
):
|
):
|
||||||
"""Mix liquid in the specified resources."""
|
"""Mix liquid in the specified resources."""
|
||||||
|
|
||||||
plate_indexes = []
|
plate_indexes = []
|
||||||
for op in targets:
|
for op in targets:
|
||||||
deck = op.parent.parent.parent
|
deck = op.parent.parent.parent
|
||||||
@@ -1431,7 +1538,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
|
|||||||
|
|
||||||
async def aspirate(self, ops: List[SingleChannelAspiration], use_channels: List[int] = None):
|
async def aspirate(self, ops: List[SingleChannelAspiration], use_channels: List[int] = None):
|
||||||
"""Aspirate liquid from the specified resources."""
|
"""Aspirate liquid from the specified resources."""
|
||||||
if hasattr(use_channels, 'tolist'):
|
if hasattr(use_channels, "tolist"):
|
||||||
_use_channels = use_channels.tolist()
|
_use_channels = use_channels.tolist()
|
||||||
else:
|
else:
|
||||||
_use_channels = list(use_channels) if use_channels is not None else None
|
_use_channels = list(use_channels) if use_channels is not None else None
|
||||||
@@ -1488,7 +1595,7 @@ class PRCXI9300Backend(LiquidHandlerBackend):
|
|||||||
|
|
||||||
async def dispense(self, ops: List[SingleChannelDispense], use_channels: List[int] = None):
|
async def dispense(self, ops: List[SingleChannelDispense], use_channels: List[int] = None):
|
||||||
"""Dispense liquid into the specified resources."""
|
"""Dispense liquid into the specified resources."""
|
||||||
if hasattr(use_channels, 'tolist'):
|
if hasattr(use_channels, "tolist"):
|
||||||
_use_channels = use_channels.tolist()
|
_use_channels = use_channels.tolist()
|
||||||
else:
|
else:
|
||||||
_use_channels = list(use_channels) if use_channels is not None else None
|
_use_channels = list(use_channels) if use_channels is not None else None
|
||||||
@@ -1653,10 +1760,10 @@ class PRCXI9300Api:
|
|||||||
start = False
|
start = False
|
||||||
while not success:
|
while not success:
|
||||||
status = self.step_state_list()
|
status = self.step_state_list()
|
||||||
if len(status) == 1:
|
|
||||||
start = True
|
|
||||||
if status is None:
|
if status is None:
|
||||||
break
|
break
|
||||||
|
if len(status) == 1:
|
||||||
|
start = True
|
||||||
if len(status) == 0:
|
if len(status) == 0:
|
||||||
break
|
break
|
||||||
if status[-1]["State"] == 2 and start:
|
if status[-1]["State"] == 2 and start:
|
||||||
@@ -1669,7 +1776,6 @@ class PRCXI9300Api:
|
|||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
return success
|
return success
|
||||||
|
|
||||||
|
|
||||||
def call(self, service: str, method: str, params: Optional[list] = None) -> Any:
|
def call(self, service: str, method: str, params: Optional[list] = None) -> Any:
|
||||||
payload = json.dumps(
|
payload = json.dumps(
|
||||||
{"ServiceName": service, "MethodName": method, "Paramters": params or []}, separators=(",", ":")
|
{"ServiceName": service, "MethodName": method, "Paramters": params or []}, separators=(",", ":")
|
||||||
@@ -1803,7 +1909,7 @@ class PRCXI9300Api:
|
|||||||
assist_fun5: str = "",
|
assist_fun5: str = "",
|
||||||
liquid_method: str = "NormalDispense",
|
liquid_method: str = "NormalDispense",
|
||||||
axis: str = "Left",
|
axis: str = "Left",
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
"StepAxis": axis,
|
"StepAxis": axis,
|
||||||
"Function": "Imbibing",
|
"Function": "Imbibing",
|
||||||
@@ -1881,7 +1987,7 @@ class PRCXI9300Api:
|
|||||||
assist_fun5: str = "",
|
assist_fun5: str = "",
|
||||||
liquid_method: str = "NormalDispense",
|
liquid_method: str = "NormalDispense",
|
||||||
axis: str = "Left",
|
axis: str = "Left",
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
"StepAxis": axis,
|
"StepAxis": axis,
|
||||||
"Function": "Blending",
|
"Function": "Blending",
|
||||||
@@ -1941,11 +2047,11 @@ class PRCXI9300Api:
|
|||||||
"LiquidDispensingMethod": liquid_method,
|
"LiquidDispensingMethod": liquid_method,
|
||||||
}
|
}
|
||||||
|
|
||||||
def clamp_jaw_pick_up(self,
|
def clamp_jaw_pick_up(
|
||||||
|
self,
|
||||||
plate_no: int,
|
plate_no: int,
|
||||||
is_whole_plate: bool,
|
is_whole_plate: bool,
|
||||||
balance_height: int,
|
balance_height: int,
|
||||||
|
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
"StepAxis": "ClampingJaw",
|
"StepAxis": "ClampingJaw",
|
||||||
@@ -1955,7 +2061,7 @@ class PRCXI9300Api:
|
|||||||
"HoleRow": 1,
|
"HoleRow": 1,
|
||||||
"HoleCol": 1,
|
"HoleCol": 1,
|
||||||
"BalanceHeight": balance_height,
|
"BalanceHeight": balance_height,
|
||||||
"PlateOrHoleNum": f"T{plate_no}"
|
"PlateOrHoleNum": f"T{plate_no}",
|
||||||
}
|
}
|
||||||
|
|
||||||
def clamp_jaw_drop(
|
def clamp_jaw_drop(
|
||||||
@@ -1963,7 +2069,6 @@ class PRCXI9300Api:
|
|||||||
plate_no: int,
|
plate_no: int,
|
||||||
is_whole_plate: bool,
|
is_whole_plate: bool,
|
||||||
balance_height: int,
|
balance_height: int,
|
||||||
|
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
"StepAxis": "ClampingJaw",
|
"StepAxis": "ClampingJaw",
|
||||||
@@ -1973,7 +2078,7 @@ class PRCXI9300Api:
|
|||||||
"HoleRow": 1,
|
"HoleRow": 1,
|
||||||
"HoleCol": 1,
|
"HoleCol": 1,
|
||||||
"BalanceHeight": balance_height,
|
"BalanceHeight": balance_height,
|
||||||
"PlateOrHoleNum": f"T{plate_no}"
|
"PlateOrHoleNum": f"T{plate_no}",
|
||||||
}
|
}
|
||||||
|
|
||||||
def shaker_action(self, time: int, module_no: int, amplitude: int, is_wait: bool):
|
def shaker_action(self, time: int, module_no: int, amplitude: int, is_wait: bool):
|
||||||
@@ -1986,6 +2091,27 @@ class PRCXI9300Api:
|
|||||||
"AssistFun4": is_wait,
|
"AssistFun4": is_wait,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def shaking_incubation_action(self, time: int, module_no: int, amplitude: int, is_wait: bool, temperature: int):
|
||||||
|
return {
|
||||||
|
"StepAxis": "Left",
|
||||||
|
"Function": "Shaking_Incubation",
|
||||||
|
"AssistFun1": time,
|
||||||
|
"AssistFun2": module_no,
|
||||||
|
"AssistFun3": amplitude,
|
||||||
|
"AssistFun4": is_wait,
|
||||||
|
"AssistFun5": temperature,
|
||||||
|
}
|
||||||
|
|
||||||
|
def magnetic_action(self, time: int, module_no: int, height: int, is_wait: bool):
|
||||||
|
return {
|
||||||
|
"StepAxis": "Left",
|
||||||
|
"Function": "Magnetic",
|
||||||
|
"AssistFun1": time,
|
||||||
|
"AssistFun2": module_no,
|
||||||
|
"AssistFun3": height,
|
||||||
|
"AssistFun4": is_wait,
|
||||||
|
}
|
||||||
|
|
||||||
class DefaultLayout:
|
class DefaultLayout:
|
||||||
|
|
||||||
def __init__(self, product_name: str = "PRCXI9300"):
|
def __init__(self, product_name: str = "PRCXI9300"):
|
||||||
@@ -2364,7 +2490,9 @@ if __name__ == "__main__":
|
|||||||
size_y=50,
|
size_y=50,
|
||||||
size_z=10,
|
size_z=10,
|
||||||
category="tip_rack",
|
category="tip_rack",
|
||||||
ordered_items=collections.OrderedDict({k: f"{child_prefix}_{k}" for k, v in tip_racks["ordering"].items()}),
|
ordered_items=collections.OrderedDict(
|
||||||
|
{k: f"{child_prefix}_{k}" for k, v in tip_racks["ordering"].items()}
|
||||||
|
),
|
||||||
)
|
)
|
||||||
tip_rack_serialized = tip_rack.serialize()
|
tip_rack_serialized = tip_rack.serialize()
|
||||||
tip_rack_serialized["parent_name"] = deck.name
|
tip_rack_serialized["parent_name"] = deck.name
|
||||||
@@ -2559,43 +2687,37 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
A = tree_to_list([resource_plr_to_ulab(deck)])
|
A = tree_to_list([resource_plr_to_ulab(deck)])
|
||||||
with open("deck.json", "w", encoding="utf-8") as f:
|
with open("deck.json", "w", encoding="utf-8") as f:
|
||||||
A.insert(0, {
|
A.insert(
|
||||||
"id": "PRCXI",
|
0,
|
||||||
"name": "PRCXI",
|
{
|
||||||
"parent": None,
|
"id": "PRCXI",
|
||||||
"type": "device",
|
"name": "PRCXI",
|
||||||
"class": "liquid_handler.prcxi",
|
"parent": None,
|
||||||
"position": {
|
"type": "device",
|
||||||
"x": 0,
|
"class": "liquid_handler.prcxi",
|
||||||
"y": 0,
|
"position": {"x": 0, "y": 0, "z": 0},
|
||||||
"z": 0
|
"config": {
|
||||||
},
|
"deck": {
|
||||||
"config": {
|
"_resource_child_name": "PRCXI_Deck",
|
||||||
"deck": {
|
"_resource_type": "unilabos.devices.liquid_handling.prcxi.prcxi:PRCXI9300Deck",
|
||||||
"_resource_child_name": "PRCXI_Deck",
|
},
|
||||||
"_resource_type": "unilabos.devices.liquid_handling.prcxi.prcxi:PRCXI9300Deck"
|
"host": "192.168.0.121",
|
||||||
|
"port": 9999,
|
||||||
|
"timeout": 10.0,
|
||||||
|
"axis": "Right",
|
||||||
|
"channel_num": 1,
|
||||||
|
"setup": False,
|
||||||
|
"debug": True,
|
||||||
|
"simulator": True,
|
||||||
|
"matrix_id": "5de524d0-3f95-406c-86dd-f83626ebc7cb",
|
||||||
|
"is_9320": True,
|
||||||
},
|
},
|
||||||
"host": "192.168.0.121",
|
"data": {},
|
||||||
"port": 9999,
|
"children": ["PRCXI_Deck"],
|
||||||
"timeout": 10.0,
|
|
||||||
"axis": "Right",
|
|
||||||
"channel_num": 1,
|
|
||||||
"setup": False,
|
|
||||||
"debug": True,
|
|
||||||
"simulator": True,
|
|
||||||
"matrix_id": "5de524d0-3f95-406c-86dd-f83626ebc7cb",
|
|
||||||
"is_9320": True
|
|
||||||
},
|
},
|
||||||
"data": {},
|
)
|
||||||
"children": [
|
|
||||||
"PRCXI_Deck"
|
|
||||||
]
|
|
||||||
})
|
|
||||||
A[1]["parent"] = "PRCXI"
|
A[1]["parent"] = "PRCXI"
|
||||||
json.dump({
|
json.dump({"nodes": A, "links": []}, f, indent=4, ensure_ascii=False)
|
||||||
"nodes": A,
|
|
||||||
"links": []
|
|
||||||
}, f, indent=4, ensure_ascii=False)
|
|
||||||
|
|
||||||
handler = PRCXI9300Handler(
|
handler = PRCXI9300Handler(
|
||||||
deck=deck,
|
deck=deck,
|
||||||
@@ -2637,7 +2759,6 @@ if __name__ == "__main__":
|
|||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
os._exit(0)
|
os._exit(0)
|
||||||
|
|
||||||
|
|
||||||
prcxi_api = PRCXI9300Api(host="192.168.0.121", port=9999)
|
prcxi_api = PRCXI9300Api(host="192.168.0.121", port=9999)
|
||||||
prcxi_api.list_matrices()
|
prcxi_api.list_matrices()
|
||||||
prcxi_api.get_all_materials()
|
prcxi_api.get_all_materials()
|
||||||
|
|||||||
@@ -15,35 +15,35 @@ class VirtualPumpMode(Enum):
|
|||||||
|
|
||||||
class VirtualTransferPump:
|
class VirtualTransferPump:
|
||||||
"""虚拟转移泵类 - 模拟泵的基本功能,无需实际硬件 🚰"""
|
"""虚拟转移泵类 - 模拟泵的基本功能,无需实际硬件 🚰"""
|
||||||
|
|
||||||
_ros_node: BaseROS2DeviceNode
|
_ros_node: BaseROS2DeviceNode
|
||||||
|
|
||||||
def __init__(self, device_id: str = None, config: dict = None, **kwargs):
|
def __init__(self, device_id: str = None, config: dict = None, **kwargs):
|
||||||
"""
|
"""
|
||||||
初始化虚拟转移泵
|
初始化虚拟转移泵
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
device_id: 设备ID
|
device_id: 设备ID
|
||||||
config: 配置字典,包含max_volume, port等参数
|
config: 配置字典,包含max_volume, port等参数
|
||||||
**kwargs: 其他参数,确保兼容性
|
**kwargs: 其他参数,确保兼容性
|
||||||
"""
|
"""
|
||||||
self.device_id = device_id or "virtual_transfer_pump"
|
self.device_id = device_id or "virtual_transfer_pump"
|
||||||
|
|
||||||
# 从config或kwargs中获取参数,确保类型正确
|
# 从config或kwargs中获取参数,确保类型正确
|
||||||
if config:
|
if config:
|
||||||
self.max_volume = float(config.get('max_volume', 25.0))
|
self.max_volume = float(config.get("max_volume", 25.0))
|
||||||
self.port = config.get('port', 'VIRTUAL')
|
self.port = config.get("port", "VIRTUAL")
|
||||||
else:
|
else:
|
||||||
self.max_volume = float(kwargs.get('max_volume', 25.0))
|
self.max_volume = float(kwargs.get("max_volume", 25.0))
|
||||||
self.port = kwargs.get('port', 'VIRTUAL')
|
self.port = kwargs.get("port", "VIRTUAL")
|
||||||
|
|
||||||
self._transfer_rate = float(kwargs.get('transfer_rate', 0))
|
self._transfer_rate = float(kwargs.get("transfer_rate", 0))
|
||||||
self.mode = kwargs.get('mode', VirtualPumpMode.Normal)
|
self.mode = kwargs.get("mode", VirtualPumpMode.Normal)
|
||||||
|
|
||||||
# 状态变量 - 确保都是正确类型
|
# 状态变量 - 确保都是正确类型
|
||||||
self._status = "Idle"
|
self._status = "Idle"
|
||||||
self._position = 0.0 # float
|
self._position = 0.0 # float
|
||||||
self._max_velocity = 5.0 # float
|
self._max_velocity = 5.0 # float
|
||||||
self._current_volume = 0.0 # float
|
self._current_volume = 0.0 # float
|
||||||
|
|
||||||
# 🚀 新增:快速模式设置 - 大幅缩短执行时间
|
# 🚀 新增:快速模式设置 - 大幅缩短执行时间
|
||||||
@@ -52,14 +52,16 @@ class VirtualTransferPump:
|
|||||||
self._fast_dispense_time = 1.0 # 快速喷射时间(秒)
|
self._fast_dispense_time = 1.0 # 快速喷射时间(秒)
|
||||||
|
|
||||||
self.logger = logging.getLogger(f"VirtualTransferPump.{self.device_id}")
|
self.logger = logging.getLogger(f"VirtualTransferPump.{self.device_id}")
|
||||||
|
|
||||||
print(f"🚰 === 虚拟转移泵 {self.device_id} 已创建 === ✨")
|
print(f"🚰 === 虚拟转移泵 {self.device_id} 已创建 === ✨")
|
||||||
print(f"💨 快速模式: {'启用' if self._fast_mode else '禁用'} | 移动时间: {self._fast_move_time}s | 喷射时间: {self._fast_dispense_time}s")
|
print(
|
||||||
|
f"💨 快速模式: {'启用' if self._fast_mode else '禁用'} | 移动时间: {self._fast_move_time}s | 喷射时间: {self._fast_dispense_time}s"
|
||||||
|
)
|
||||||
print(f"📊 最大容量: {self.max_volume}mL | 端口: {self.port}")
|
print(f"📊 最大容量: {self.max_volume}mL | 端口: {self.port}")
|
||||||
|
|
||||||
def post_init(self, ros_node: BaseROS2DeviceNode):
|
def post_init(self, ros_node: BaseROS2DeviceNode):
|
||||||
self._ros_node = ros_node
|
self._ros_node = ros_node
|
||||||
|
|
||||||
async def initialize(self) -> bool:
|
async def initialize(self) -> bool:
|
||||||
"""初始化虚拟泵 🚀"""
|
"""初始化虚拟泵 🚀"""
|
||||||
self.logger.info(f"🔧 初始化虚拟转移泵 {self.device_id} ✨")
|
self.logger.info(f"🔧 初始化虚拟转移泵 {self.device_id} ✨")
|
||||||
@@ -68,33 +70,33 @@ class VirtualTransferPump:
|
|||||||
self._current_volume = 0.0
|
self._current_volume = 0.0
|
||||||
self.logger.info(f"✅ 转移泵 {self.device_id} 初始化完成 🚰")
|
self.logger.info(f"✅ 转移泵 {self.device_id} 初始化完成 🚰")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
async def cleanup(self) -> bool:
|
async def cleanup(self) -> bool:
|
||||||
"""清理虚拟泵 🧹"""
|
"""清理虚拟泵 🧹"""
|
||||||
self.logger.info(f"🧹 清理虚拟转移泵 {self.device_id} 🔚")
|
self.logger.info(f"🧹 清理虚拟转移泵 {self.device_id} 🔚")
|
||||||
self._status = "Idle"
|
self._status = "Idle"
|
||||||
self.logger.info(f"✅ 转移泵 {self.device_id} 清理完成 💤")
|
self.logger.info(f"✅ 转移泵 {self.device_id} 清理完成 💤")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# 基本属性
|
# 基本属性
|
||||||
@property
|
@property
|
||||||
def status(self) -> str:
|
def status(self) -> str:
|
||||||
return self._status
|
return self._status
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def position(self) -> float:
|
def position(self) -> float:
|
||||||
"""当前柱塞位置 (ml) 📍"""
|
"""当前柱塞位置 (ml) 📍"""
|
||||||
return self._position
|
return self._position
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def current_volume(self) -> float:
|
def current_volume(self) -> float:
|
||||||
"""当前注射器中的体积 (ml) 💧"""
|
"""当前注射器中的体积 (ml) 💧"""
|
||||||
return self._current_volume
|
return self._current_volume
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def max_velocity(self) -> float:
|
def max_velocity(self) -> float:
|
||||||
return self._max_velocity
|
return self._max_velocity
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def transfer_rate(self) -> float:
|
def transfer_rate(self) -> float:
|
||||||
return self._transfer_rate
|
return self._transfer_rate
|
||||||
@@ -103,17 +105,17 @@ class VirtualTransferPump:
|
|||||||
"""设置最大速度 (ml/s) 🌊"""
|
"""设置最大速度 (ml/s) 🌊"""
|
||||||
self._max_velocity = max(0.1, min(50.0, velocity)) # 限制在合理范围内
|
self._max_velocity = max(0.1, min(50.0, velocity)) # 限制在合理范围内
|
||||||
self.logger.info(f"🌊 设置最大速度为 {self._max_velocity} mL/s")
|
self.logger.info(f"🌊 设置最大速度为 {self._max_velocity} mL/s")
|
||||||
|
|
||||||
def get_status(self) -> str:
|
def get_status(self) -> str:
|
||||||
"""获取泵状态 📋"""
|
"""获取泵状态 📋"""
|
||||||
return self._status
|
return self._status
|
||||||
|
|
||||||
async def _simulate_operation(self, duration: float):
|
async def _simulate_operation(self, duration: float):
|
||||||
"""模拟操作延时 ⏱️"""
|
"""模拟操作延时 ⏱️"""
|
||||||
self._status = "Busy"
|
self._status = "Busy"
|
||||||
await self._ros_node.sleep(duration)
|
await self._ros_node.sleep(duration)
|
||||||
self._status = "Idle"
|
self._status = "Idle"
|
||||||
|
|
||||||
def _calculate_duration(self, volume: float, velocity: float = None) -> float:
|
def _calculate_duration(self, volume: float, velocity: float = None) -> float:
|
||||||
"""
|
"""
|
||||||
计算操作持续时间 ⏰
|
计算操作持续时间 ⏰
|
||||||
@@ -121,10 +123,10 @@ class VirtualTransferPump:
|
|||||||
"""
|
"""
|
||||||
if velocity is None:
|
if velocity is None:
|
||||||
velocity = self._max_velocity
|
velocity = self._max_velocity
|
||||||
|
|
||||||
# 📊 计算理论时间(用于日志显示)
|
# 📊 计算理论时间(用于日志显示)
|
||||||
theoretical_duration = abs(volume) / velocity
|
theoretical_duration = abs(volume) / velocity
|
||||||
|
|
||||||
# 🚀 如果启用快速模式,使用固定的快速时间
|
# 🚀 如果启用快速模式,使用固定的快速时间
|
||||||
if self._fast_mode:
|
if self._fast_mode:
|
||||||
# 根据操作类型选择快速时间
|
# 根据操作类型选择快速时间
|
||||||
@@ -132,13 +134,13 @@ class VirtualTransferPump:
|
|||||||
actual_duration = self._fast_move_time
|
actual_duration = self._fast_move_time
|
||||||
else: # 很小的操作
|
else: # 很小的操作
|
||||||
actual_duration = 0.5
|
actual_duration = 0.5
|
||||||
|
|
||||||
self.logger.debug(f"⚡ 快速模式: 理论时间 {theoretical_duration:.2f}s → 实际时间 {actual_duration:.2f}s")
|
self.logger.debug(f"⚡ 快速模式: 理论时间 {theoretical_duration:.2f}s → 实际时间 {actual_duration:.2f}s")
|
||||||
return actual_duration
|
return actual_duration
|
||||||
else:
|
else:
|
||||||
# 正常模式使用理论时间
|
# 正常模式使用理论时间
|
||||||
return theoretical_duration
|
return theoretical_duration
|
||||||
|
|
||||||
def _calculate_display_duration(self, volume: float, velocity: float = None) -> float:
|
def _calculate_display_duration(self, volume: float, velocity: float = None) -> float:
|
||||||
"""
|
"""
|
||||||
计算显示用的持续时间(用于日志) 📊
|
计算显示用的持续时间(用于日志) 📊
|
||||||
@@ -147,16 +149,16 @@ class VirtualTransferPump:
|
|||||||
if velocity is None:
|
if velocity is None:
|
||||||
velocity = self._max_velocity
|
velocity = self._max_velocity
|
||||||
return abs(volume) / velocity
|
return abs(volume) / velocity
|
||||||
|
|
||||||
# 新的set_position方法 - 专门用于SetPumpPosition动作
|
# 新的set_position方法 - 专门用于SetPumpPosition动作
|
||||||
async def set_position(self, position: float, max_velocity: float = None):
|
async def set_position(self, position: float, max_velocity: float = None):
|
||||||
"""
|
"""
|
||||||
移动到绝对位置 - 专门用于SetPumpPosition动作 🎯
|
移动到绝对位置 - 专门用于SetPumpPosition动作 🎯
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
position (float): 目标位置 (ml)
|
position (float): 目标位置 (ml)
|
||||||
max_velocity (float): 移动速度 (ml/s)
|
max_velocity (float): 移动速度 (ml/s)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
dict: 符合SetPumpPosition.action定义的结果
|
dict: 符合SetPumpPosition.action定义的结果
|
||||||
"""
|
"""
|
||||||
@@ -164,19 +166,19 @@ class VirtualTransferPump:
|
|||||||
# 验证并转换参数
|
# 验证并转换参数
|
||||||
target_position = float(position)
|
target_position = float(position)
|
||||||
velocity = float(max_velocity) if max_velocity is not None else self._max_velocity
|
velocity = float(max_velocity) if max_velocity is not None else self._max_velocity
|
||||||
|
|
||||||
# 限制位置在有效范围内
|
# 限制位置在有效范围内
|
||||||
target_position = max(0.0, min(float(self.max_volume), target_position))
|
target_position = max(0.0, min(float(self.max_volume), target_position))
|
||||||
|
|
||||||
# 计算移动距离
|
# 计算移动距离
|
||||||
volume_to_move = abs(target_position - self._position)
|
volume_to_move = abs(target_position - self._position)
|
||||||
|
|
||||||
# 📊 计算显示用的时间(用于日志)
|
# 📊 计算显示用的时间(用于日志)
|
||||||
display_duration = self._calculate_display_duration(volume_to_move, velocity)
|
display_duration = self._calculate_display_duration(volume_to_move, velocity)
|
||||||
|
|
||||||
# ⚡ 计算实际执行时间(快速模式)
|
# ⚡ 计算实际执行时间(快速模式)
|
||||||
actual_duration = self._calculate_duration(volume_to_move, velocity)
|
actual_duration = self._calculate_duration(volume_to_move, velocity)
|
||||||
|
|
||||||
# 🎯 确定操作类型和emoji
|
# 🎯 确定操作类型和emoji
|
||||||
if target_position > self._position:
|
if target_position > self._position:
|
||||||
operation_type = "吸液"
|
operation_type = "吸液"
|
||||||
@@ -187,28 +189,34 @@ class VirtualTransferPump:
|
|||||||
else:
|
else:
|
||||||
operation_type = "保持"
|
operation_type = "保持"
|
||||||
operation_emoji = "📍"
|
operation_emoji = "📍"
|
||||||
|
|
||||||
self.logger.info(f"🎯 SET_POSITION: {operation_type} {operation_emoji}")
|
self.logger.info(f"🎯 SET_POSITION: {operation_type} {operation_emoji}")
|
||||||
self.logger.info(f" 📍 位置: {self._position:.2f}mL → {target_position:.2f}mL (移动 {volume_to_move:.2f}mL)")
|
self.logger.info(
|
||||||
|
f" 📍 位置: {self._position:.2f}mL → {target_position:.2f}mL (移动 {volume_to_move:.2f}mL)"
|
||||||
|
)
|
||||||
self.logger.info(f" 🌊 速度: {velocity:.2f} mL/s")
|
self.logger.info(f" 🌊 速度: {velocity:.2f} mL/s")
|
||||||
self.logger.info(f" ⏰ 预计时间: {display_duration:.2f}s")
|
self.logger.info(f" ⏰ 预计时间: {display_duration:.2f}s")
|
||||||
|
|
||||||
if self._fast_mode:
|
if self._fast_mode:
|
||||||
self.logger.info(f" ⚡ 快速模式: 实际用时 {actual_duration:.2f}s")
|
self.logger.info(f" ⚡ 快速模式: 实际用时 {actual_duration:.2f}s")
|
||||||
|
|
||||||
# 🚀 模拟移动过程
|
# 🚀 模拟移动过程
|
||||||
if volume_to_move > 0.01: # 只有当移动距离足够大时才显示进度
|
if volume_to_move > 0.01: # 只有当移动距离足够大时才显示进度
|
||||||
start_position = self._position
|
start_position = self._position
|
||||||
steps = 5 if actual_duration > 0.5 else 2 # 根据实际时间调整步数
|
steps = 5 if actual_duration > 0.5 else 2 # 根据实际时间调整步数
|
||||||
step_duration = actual_duration / steps
|
step_duration = actual_duration / steps
|
||||||
|
|
||||||
self.logger.info(f"🚀 开始{operation_type}... {operation_emoji}")
|
self.logger.info(f"🚀 开始{operation_type}... {operation_emoji}")
|
||||||
|
|
||||||
for i in range(steps + 1):
|
for i in range(steps + 1):
|
||||||
# 计算当前位置和进度
|
# 计算当前位置和进度
|
||||||
progress = (i / steps) * 100 if steps > 0 else 100
|
progress = (i / steps) * 100 if steps > 0 else 100
|
||||||
current_pos = start_position + (target_position - start_position) * (i / steps) if steps > 0 else target_position
|
current_pos = (
|
||||||
|
start_position + (target_position - start_position) * (i / steps)
|
||||||
|
if steps > 0
|
||||||
|
else target_position
|
||||||
|
)
|
||||||
|
|
||||||
# 更新状态
|
# 更新状态
|
||||||
if i < steps:
|
if i < steps:
|
||||||
self._status = f"{operation_type}中"
|
self._status = f"{operation_type}中"
|
||||||
@@ -216,10 +224,10 @@ class VirtualTransferPump:
|
|||||||
else:
|
else:
|
||||||
self._status = "Idle"
|
self._status = "Idle"
|
||||||
status_emoji = "✅"
|
status_emoji = "✅"
|
||||||
|
|
||||||
self._position = current_pos
|
self._position = current_pos
|
||||||
self._current_volume = current_pos
|
self._current_volume = current_pos
|
||||||
|
|
||||||
# 显示进度(每25%或最后一步)
|
# 显示进度(每25%或最后一步)
|
||||||
if i == 0:
|
if i == 0:
|
||||||
self.logger.debug(f" 🔄 {operation_type}开始: {progress:.0f}%")
|
self.logger.debug(f" 🔄 {operation_type}开始: {progress:.0f}%")
|
||||||
@@ -227,7 +235,7 @@ class VirtualTransferPump:
|
|||||||
self.logger.debug(f" 🔄 {operation_type}进度: {progress:.0f}%")
|
self.logger.debug(f" 🔄 {operation_type}进度: {progress:.0f}%")
|
||||||
elif i == steps:
|
elif i == steps:
|
||||||
self.logger.info(f" ✅ {operation_type}完成: {progress:.0f}% | 当前位置: {current_pos:.2f}mL")
|
self.logger.info(f" ✅ {operation_type}完成: {progress:.0f}% | 当前位置: {current_pos:.2f}mL")
|
||||||
|
|
||||||
# 等待一小步时间
|
# 等待一小步时间
|
||||||
if i < steps and step_duration > 0:
|
if i < steps and step_duration > 0:
|
||||||
await self._ros_node.sleep(step_duration)
|
await self._ros_node.sleep(step_duration)
|
||||||
@@ -236,25 +244,27 @@ class VirtualTransferPump:
|
|||||||
self._position = target_position
|
self._position = target_position
|
||||||
self._current_volume = target_position
|
self._current_volume = target_position
|
||||||
self.logger.info(f" 📍 微调完成: {target_position:.2f}mL")
|
self.logger.info(f" 📍 微调完成: {target_position:.2f}mL")
|
||||||
|
|
||||||
# 确保最终位置准确
|
# 确保最终位置准确
|
||||||
self._position = target_position
|
self._position = target_position
|
||||||
self._current_volume = target_position
|
self._current_volume = target_position
|
||||||
self._status = "Idle"
|
self._status = "Idle"
|
||||||
|
|
||||||
# 📊 最终状态日志
|
# 📊 最终状态日志
|
||||||
if volume_to_move > 0.01:
|
if volume_to_move > 0.01:
|
||||||
self.logger.info(f"🎉 SET_POSITION 完成! 📍 最终位置: {self._position:.2f}mL | 💧 当前体积: {self._current_volume:.2f}mL")
|
self.logger.info(
|
||||||
|
f"🎉 SET_POSITION 完成! 📍 最终位置: {self._position:.2f}mL | 💧 当前体积: {self._current_volume:.2f}mL"
|
||||||
|
)
|
||||||
|
|
||||||
# 返回符合action定义的结果
|
# 返回符合action定义的结果
|
||||||
return {
|
return {
|
||||||
"success": True,
|
"success": True,
|
||||||
"message": f"✅ 成功移动到位置 {self._position:.2f}mL ({operation_type})",
|
"message": f"✅ 成功移动到位置 {self._position:.2f}mL ({operation_type})",
|
||||||
"final_position": self._position,
|
"final_position": self._position,
|
||||||
"final_volume": self._current_volume,
|
"final_volume": self._current_volume,
|
||||||
"operation_type": operation_type
|
"operation_type": operation_type,
|
||||||
}
|
}
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
error_msg = f"❌ 设置位置失败: {str(e)}"
|
error_msg = f"❌ 设置位置失败: {str(e)}"
|
||||||
self.logger.error(error_msg)
|
self.logger.error(error_msg)
|
||||||
@@ -262,134 +272,136 @@ class VirtualTransferPump:
|
|||||||
"success": False,
|
"success": False,
|
||||||
"message": error_msg,
|
"message": error_msg,
|
||||||
"final_position": self._position,
|
"final_position": self._position,
|
||||||
"final_volume": self._current_volume
|
"final_volume": self._current_volume,
|
||||||
}
|
}
|
||||||
|
|
||||||
# 其他泵操作方法
|
# 其他泵操作方法
|
||||||
async def pull_plunger(self, volume: float, velocity: float = None):
|
async def pull_plunger(self, volume: float, velocity: float = None):
|
||||||
"""
|
"""
|
||||||
拉取柱塞(吸液) 📥
|
拉取柱塞(吸液) 📥
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
volume (float): 要拉取的体积 (ml)
|
volume (float): 要拉取的体积 (ml)
|
||||||
velocity (float): 拉取速度 (ml/s)
|
velocity (float): 拉取速度 (ml/s)
|
||||||
"""
|
"""
|
||||||
new_position = min(self.max_volume, self._position + volume)
|
new_position = min(self.max_volume, self._position + volume)
|
||||||
actual_volume = new_position - self._position
|
actual_volume = new_position - self._position
|
||||||
|
|
||||||
if actual_volume <= 0:
|
if actual_volume <= 0:
|
||||||
self.logger.warning("⚠️ 无法吸液 - 已达到最大容量")
|
self.logger.warning("⚠️ 无法吸液 - 已达到最大容量")
|
||||||
return
|
return
|
||||||
|
|
||||||
display_duration = self._calculate_display_duration(actual_volume, velocity)
|
display_duration = self._calculate_display_duration(actual_volume, velocity)
|
||||||
actual_duration = self._calculate_duration(actual_volume, velocity)
|
actual_duration = self._calculate_duration(actual_volume, velocity)
|
||||||
|
|
||||||
self.logger.info(f"📥 开始吸液: {actual_volume:.2f}mL")
|
self.logger.info(f"📥 开始吸液: {actual_volume:.2f}mL")
|
||||||
self.logger.info(f" 📍 位置: {self._position:.2f}mL → {new_position:.2f}mL")
|
self.logger.info(f" 📍 位置: {self._position:.2f}mL → {new_position:.2f}mL")
|
||||||
self.logger.info(f" ⏰ 预计时间: {display_duration:.2f}s")
|
self.logger.info(f" ⏰ 预计时间: {display_duration:.2f}s")
|
||||||
|
|
||||||
if self._fast_mode:
|
if self._fast_mode:
|
||||||
self.logger.info(f" ⚡ 快速模式: 实际用时 {actual_duration:.2f}s")
|
self.logger.info(f" ⚡ 快速模式: 实际用时 {actual_duration:.2f}s")
|
||||||
|
|
||||||
await self._simulate_operation(actual_duration)
|
await self._simulate_operation(actual_duration)
|
||||||
|
|
||||||
self._position = new_position
|
self._position = new_position
|
||||||
self._current_volume = new_position
|
self._current_volume = new_position
|
||||||
|
|
||||||
self.logger.info(f"✅ 吸液完成: {actual_volume:.2f}mL | 💧 当前体积: {self._current_volume:.2f}mL")
|
self.logger.info(f"✅ 吸液完成: {actual_volume:.2f}mL | 💧 当前体积: {self._current_volume:.2f}mL")
|
||||||
|
|
||||||
async def push_plunger(self, volume: float, velocity: float = None):
|
async def push_plunger(self, volume: float, velocity: float = None):
|
||||||
"""
|
"""
|
||||||
推出柱塞(排液) 📤
|
推出柱塞(排液) 📤
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
volume (float): 要推出的体积 (ml)
|
volume (float): 要推出的体积 (ml)
|
||||||
velocity (float): 推出速度 (ml/s)
|
velocity (float): 推出速度 (ml/s)
|
||||||
"""
|
"""
|
||||||
new_position = max(0, self._position - volume)
|
new_position = max(0, self._position - volume)
|
||||||
actual_volume = self._position - new_position
|
actual_volume = self._position - new_position
|
||||||
|
|
||||||
if actual_volume <= 0:
|
if actual_volume <= 0:
|
||||||
self.logger.warning("⚠️ 无法排液 - 已达到最小容量")
|
self.logger.warning("⚠️ 无法排液 - 已达到最小容量")
|
||||||
return
|
return
|
||||||
|
|
||||||
display_duration = self._calculate_display_duration(actual_volume, velocity)
|
display_duration = self._calculate_display_duration(actual_volume, velocity)
|
||||||
actual_duration = self._calculate_duration(actual_volume, velocity)
|
actual_duration = self._calculate_duration(actual_volume, velocity)
|
||||||
|
|
||||||
self.logger.info(f"📤 开始排液: {actual_volume:.2f}mL")
|
self.logger.info(f"📤 开始排液: {actual_volume:.2f}mL")
|
||||||
self.logger.info(f" 📍 位置: {self._position:.2f}mL → {new_position:.2f}mL")
|
self.logger.info(f" 📍 位置: {self._position:.2f}mL → {new_position:.2f}mL")
|
||||||
self.logger.info(f" ⏰ 预计时间: {display_duration:.2f}s")
|
self.logger.info(f" ⏰ 预计时间: {display_duration:.2f}s")
|
||||||
|
|
||||||
if self._fast_mode:
|
if self._fast_mode:
|
||||||
self.logger.info(f" ⚡ 快速模式: 实际用时 {actual_duration:.2f}s")
|
self.logger.info(f" ⚡ 快速模式: 实际用时 {actual_duration:.2f}s")
|
||||||
|
|
||||||
await self._simulate_operation(actual_duration)
|
await self._simulate_operation(actual_duration)
|
||||||
|
|
||||||
self._position = new_position
|
self._position = new_position
|
||||||
self._current_volume = new_position
|
self._current_volume = new_position
|
||||||
|
|
||||||
self.logger.info(f"✅ 排液完成: {actual_volume:.2f}mL | 💧 当前体积: {self._current_volume:.2f}mL")
|
self.logger.info(f"✅ 排液完成: {actual_volume:.2f}mL | 💧 当前体积: {self._current_volume:.2f}mL")
|
||||||
|
|
||||||
# 便捷操作方法
|
# 便捷操作方法
|
||||||
async def aspirate(self, volume: float, velocity: float = None):
|
async def aspirate(self, volume: float, velocity: float = None):
|
||||||
"""吸液操作 📥"""
|
"""吸液操作 📥"""
|
||||||
await self.pull_plunger(volume, velocity)
|
await self.pull_plunger(volume, velocity)
|
||||||
|
|
||||||
async def dispense(self, volume: float, velocity: float = None):
|
async def dispense(self, volume: float, velocity: float = None):
|
||||||
"""排液操作 📤"""
|
"""排液操作 📤"""
|
||||||
await self.push_plunger(volume, velocity)
|
await self.push_plunger(volume, velocity)
|
||||||
|
|
||||||
async def transfer(self, volume: float, aspirate_velocity: float = None, dispense_velocity: float = None):
|
async def transfer(self, volume: float, aspirate_velocity: float = None, dispense_velocity: float = None):
|
||||||
"""转移操作(先吸后排) 🔄"""
|
"""转移操作(先吸后排) 🔄"""
|
||||||
self.logger.info(f"🔄 开始转移操作: {volume:.2f}mL")
|
self.logger.info(f"🔄 开始转移操作: {volume:.2f}mL")
|
||||||
|
|
||||||
# 吸液
|
# 吸液
|
||||||
await self.aspirate(volume, aspirate_velocity)
|
await self.aspirate(volume, aspirate_velocity)
|
||||||
|
|
||||||
# 短暂停顿
|
# 短暂停顿
|
||||||
self.logger.debug("⏸️ 短暂停顿...")
|
self.logger.debug("⏸️ 短暂停顿...")
|
||||||
await self._ros_node.sleep(0.1)
|
await self._ros_node.sleep(0.1)
|
||||||
|
|
||||||
# 排液
|
# 排液
|
||||||
await self.dispense(volume, dispense_velocity)
|
await self.dispense(volume, dispense_velocity)
|
||||||
|
|
||||||
async def empty_syringe(self, velocity: float = None):
|
async def empty_syringe(self, velocity: float = None):
|
||||||
"""清空注射器"""
|
"""清空注射器"""
|
||||||
await self.set_position(0, velocity)
|
await self.set_position(0, velocity)
|
||||||
|
|
||||||
async def fill_syringe(self, velocity: float = None):
|
async def fill_syringe(self, velocity: float = None):
|
||||||
"""充满注射器"""
|
"""充满注射器"""
|
||||||
await self.set_position(self.max_volume, velocity)
|
await self.set_position(self.max_volume, velocity)
|
||||||
|
|
||||||
async def stop_operation(self):
|
async def stop_operation(self):
|
||||||
"""停止当前操作"""
|
"""停止当前操作"""
|
||||||
self._status = "Idle"
|
self._status = "Idle"
|
||||||
self.logger.info("Operation stopped")
|
self.logger.info("Operation stopped")
|
||||||
|
|
||||||
# 状态查询方法
|
# 状态查询方法
|
||||||
def get_position(self) -> float:
|
def get_position(self) -> float:
|
||||||
"""获取当前位置"""
|
"""获取当前位置"""
|
||||||
return self._position
|
return self._position
|
||||||
|
|
||||||
def get_current_volume(self) -> float:
|
def get_current_volume(self) -> float:
|
||||||
"""获取当前体积"""
|
"""获取当前体积"""
|
||||||
return self._current_volume
|
return self._current_volume
|
||||||
|
|
||||||
def get_remaining_capacity(self) -> float:
|
def get_remaining_capacity(self) -> float:
|
||||||
"""获取剩余容量"""
|
"""获取剩余容量"""
|
||||||
return self.max_volume - self._current_volume
|
return self.max_volume - self._current_volume
|
||||||
|
|
||||||
def is_empty(self) -> bool:
|
def is_empty(self) -> bool:
|
||||||
"""检查是否为空"""
|
"""检查是否为空"""
|
||||||
return self._current_volume <= 0.01 # 允许小量误差
|
return self._current_volume <= 0.01 # 允许小量误差
|
||||||
|
|
||||||
def is_full(self) -> bool:
|
def is_full(self) -> bool:
|
||||||
"""检查是否已满"""
|
"""检查是否已满"""
|
||||||
return self._current_volume >= (self.max_volume - 0.01) # 允许小量误差
|
return self._current_volume >= (self.max_volume - 0.01) # 允许小量误差
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f"VirtualTransferPump({self.device_id}: {self._current_volume:.2f}/{self.max_volume} ml, {self._status})"
|
return (
|
||||||
|
f"VirtualTransferPump({self.device_id}: {self._current_volume:.2f}/{self.max_volume} ml, {self._status})"
|
||||||
|
)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return self.__str__()
|
return self.__str__()
|
||||||
|
|
||||||
@@ -398,20 +410,20 @@ class VirtualTransferPump:
|
|||||||
async def demo():
|
async def demo():
|
||||||
"""虚拟泵使用示例"""
|
"""虚拟泵使用示例"""
|
||||||
pump = VirtualTransferPump("demo_pump", {"max_volume": 50.0})
|
pump = VirtualTransferPump("demo_pump", {"max_volume": 50.0})
|
||||||
|
|
||||||
await pump.initialize()
|
await pump.initialize()
|
||||||
|
|
||||||
print(f"Initial state: {pump}")
|
print(f"Initial state: {pump}")
|
||||||
|
|
||||||
# 测试set_position方法
|
# 测试set_position方法
|
||||||
result = await pump.set_position(10.0, max_velocity=2.0)
|
result = await pump.set_position(10.0, max_velocity=2.0)
|
||||||
print(f"Set position result: {result}")
|
print(f"Set position result: {result}")
|
||||||
print(f"After setting position to 10ml: {pump}")
|
print(f"After setting position to 10ml: {pump}")
|
||||||
|
|
||||||
# 吸液测试
|
# 吸液测试
|
||||||
await pump.aspirate(5.0, velocity=2.0)
|
await pump.aspirate(5.0, velocity=2.0)
|
||||||
print(f"After aspirating 5ml: {pump}")
|
print(f"After aspirating 5ml: {pump}")
|
||||||
|
|
||||||
# 清空测试
|
# 清空测试
|
||||||
result = await pump.set_position(0.0)
|
result = await pump.set_position(0.0)
|
||||||
print(f"Empty result: {result}")
|
print(f"Empty result: {result}")
|
||||||
|
|||||||
742
unilabos/devices/virtual/workbench.py
Normal file
742
unilabos/devices/virtual/workbench.py
Normal file
@@ -0,0 +1,742 @@
|
|||||||
|
"""
|
||||||
|
Virtual Workbench Device - 模拟工作台设备
|
||||||
|
包含:
|
||||||
|
- 1个机械臂 (每次操作3s, 独占锁)
|
||||||
|
- 3个加热台 (每次加热10s, 可并行)
|
||||||
|
|
||||||
|
工作流程:
|
||||||
|
1. A1-A5 物料同时启动,竞争机械臂
|
||||||
|
2. 机械臂将物料移动到空闲加热台
|
||||||
|
3. 加热完成后,机械臂将物料移动到C1-C5
|
||||||
|
|
||||||
|
注意:调用来自线程池,使用 threading.Lock 进行同步
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
from typing import Dict, Any, Optional, List
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
|
from threading import Lock, RLock
|
||||||
|
|
||||||
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
|
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode
|
||||||
|
from unilabos.utils.decorator import not_action
|
||||||
|
from unilabos.resources.resource_tracker import SampleUUIDsType, LabSample, RETURN_UNILABOS_SAMPLES
|
||||||
|
|
||||||
|
|
||||||
|
# ============ TypedDict 返回类型定义 ============
|
||||||
|
|
||||||
|
|
||||||
|
class MoveToHeatingStationResult(TypedDict):
|
||||||
|
"""move_to_heating_station 返回类型"""
|
||||||
|
|
||||||
|
success: bool
|
||||||
|
station_id: int
|
||||||
|
material_id: str
|
||||||
|
material_number: int
|
||||||
|
message: str
|
||||||
|
unilabos_samples: List[LabSample]
|
||||||
|
|
||||||
|
|
||||||
|
class StartHeatingResult(TypedDict):
|
||||||
|
"""start_heating 返回类型"""
|
||||||
|
|
||||||
|
success: bool
|
||||||
|
station_id: int
|
||||||
|
material_id: str
|
||||||
|
material_number: int
|
||||||
|
message: str
|
||||||
|
unilabos_samples: List[LabSample]
|
||||||
|
|
||||||
|
|
||||||
|
class MoveToOutputResult(TypedDict):
|
||||||
|
"""move_to_output 返回类型"""
|
||||||
|
|
||||||
|
success: bool
|
||||||
|
station_id: int
|
||||||
|
material_id: str
|
||||||
|
unilabos_samples: List[LabSample]
|
||||||
|
|
||||||
|
|
||||||
|
class PrepareMaterialsResult(TypedDict):
|
||||||
|
"""prepare_materials 返回类型 - 批量准备物料"""
|
||||||
|
|
||||||
|
success: bool
|
||||||
|
count: int
|
||||||
|
material_1: int # 物料编号1
|
||||||
|
material_2: int # 物料编号2
|
||||||
|
material_3: int # 物料编号3
|
||||||
|
material_4: int # 物料编号4
|
||||||
|
material_5: int # 物料编号5
|
||||||
|
message: str
|
||||||
|
unilabos_samples: List[LabSample]
|
||||||
|
|
||||||
|
|
||||||
|
# ============ 状态枚举 ============
|
||||||
|
|
||||||
|
|
||||||
|
class HeatingStationState(Enum):
|
||||||
|
"""加热台状态枚举"""
|
||||||
|
|
||||||
|
IDLE = "idle" # 空闲
|
||||||
|
OCCUPIED = "occupied" # 已放置物料,等待加热
|
||||||
|
HEATING = "heating" # 加热中
|
||||||
|
COMPLETED = "completed" # 加热完成,等待取走
|
||||||
|
|
||||||
|
|
||||||
|
class ArmState(Enum):
|
||||||
|
"""机械臂状态枚举"""
|
||||||
|
|
||||||
|
IDLE = "idle" # 空闲
|
||||||
|
BUSY = "busy" # 工作中
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class HeatingStation:
|
||||||
|
"""加热台数据结构"""
|
||||||
|
|
||||||
|
station_id: int
|
||||||
|
state: HeatingStationState = HeatingStationState.IDLE
|
||||||
|
current_material: Optional[str] = None # 当前物料 (如 "A1", "A2")
|
||||||
|
material_number: Optional[int] = None # 物料编号 (1-5)
|
||||||
|
heating_start_time: Optional[float] = None
|
||||||
|
heating_progress: float = 0.0
|
||||||
|
|
||||||
|
|
||||||
|
class VirtualWorkbench:
|
||||||
|
"""
|
||||||
|
Virtual Workbench Device - 虚拟工作台设备
|
||||||
|
|
||||||
|
模拟一个包含1个机械臂和3个加热台的工作站
|
||||||
|
- 机械臂操作耗时3秒,同一时间只能执行一个操作
|
||||||
|
- 加热台加热耗时10秒,3个加热台可并行工作
|
||||||
|
|
||||||
|
工作流:
|
||||||
|
1. 物料A1-A5并发启动(线程池),竞争机械臂使用权
|
||||||
|
2. 获取机械臂后,查找空闲加热台
|
||||||
|
3. 机械臂将物料放入加热台,开始加热
|
||||||
|
4. 加热完成后,机械臂将物料移动到目标位置Cn
|
||||||
|
"""
|
||||||
|
|
||||||
|
_ros_node: BaseROS2DeviceNode
|
||||||
|
|
||||||
|
# 配置常量
|
||||||
|
ARM_OPERATION_TIME: float = 3.0 # 机械臂操作时间(秒)
|
||||||
|
HEATING_TIME: float = 10.0 # 加热时间(秒)
|
||||||
|
NUM_HEATING_STATIONS: int = 3 # 加热台数量
|
||||||
|
|
||||||
|
def __init__(self, device_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None, **kwargs):
|
||||||
|
# 处理可能的不同调用方式
|
||||||
|
if device_id is None and "id" in kwargs:
|
||||||
|
device_id = kwargs.pop("id")
|
||||||
|
if config is None and "config" in kwargs:
|
||||||
|
config = kwargs.pop("config")
|
||||||
|
|
||||||
|
self.device_id = device_id or "virtual_workbench"
|
||||||
|
self.config = config or {}
|
||||||
|
|
||||||
|
self.logger = logging.getLogger(f"VirtualWorkbench.{self.device_id}")
|
||||||
|
self.data: Dict[str, Any] = {}
|
||||||
|
|
||||||
|
# 从config中获取可配置参数
|
||||||
|
self.ARM_OPERATION_TIME = float(self.config.get("arm_operation_time", 3.0))
|
||||||
|
self.HEATING_TIME = float(self.config.get("heating_time", 10.0))
|
||||||
|
self.NUM_HEATING_STATIONS = int(self.config.get("num_heating_stations", 3))
|
||||||
|
|
||||||
|
# 机械臂状态和锁 (使用threading.Lock)
|
||||||
|
self._arm_lock = Lock()
|
||||||
|
self._arm_state = ArmState.IDLE
|
||||||
|
self._arm_current_task: Optional[str] = None
|
||||||
|
|
||||||
|
# 加热台状态 (station_id -> HeatingStation) - 立即初始化,不依赖initialize()
|
||||||
|
self._heating_stations: Dict[int, HeatingStation] = {
|
||||||
|
i: HeatingStation(station_id=i) for i in range(1, self.NUM_HEATING_STATIONS + 1)
|
||||||
|
}
|
||||||
|
self._stations_lock = RLock() # 可重入锁,保护加热台状态
|
||||||
|
|
||||||
|
# 任务追踪
|
||||||
|
self._active_tasks: Dict[str, Dict[str, Any]] = {} # material_id -> task_info
|
||||||
|
self._tasks_lock = Lock()
|
||||||
|
|
||||||
|
# 处理其他kwargs参数
|
||||||
|
skip_keys = {"arm_operation_time", "heating_time", "num_heating_stations"}
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
if key not in skip_keys and not hasattr(self, key):
|
||||||
|
setattr(self, key, value)
|
||||||
|
|
||||||
|
self.logger.info(f"=== 虚拟工作台 {self.device_id} 已创建 ===")
|
||||||
|
self.logger.info(
|
||||||
|
f"机械臂操作时间: {self.ARM_OPERATION_TIME}s | "
|
||||||
|
f"加热时间: {self.HEATING_TIME}s | "
|
||||||
|
f"加热台数量: {self.NUM_HEATING_STATIONS}"
|
||||||
|
)
|
||||||
|
|
||||||
|
@not_action
|
||||||
|
def post_init(self, ros_node: BaseROS2DeviceNode):
|
||||||
|
"""ROS节点初始化后回调"""
|
||||||
|
self._ros_node = ros_node
|
||||||
|
|
||||||
|
@not_action
|
||||||
|
def initialize(self) -> bool:
|
||||||
|
"""初始化虚拟工作台"""
|
||||||
|
self.logger.info(f"初始化虚拟工作台 {self.device_id}")
|
||||||
|
|
||||||
|
# 重置加热台状态 (已在__init__中创建,这里重置为初始状态)
|
||||||
|
with self._stations_lock:
|
||||||
|
for station in self._heating_stations.values():
|
||||||
|
station.state = HeatingStationState.IDLE
|
||||||
|
station.current_material = None
|
||||||
|
station.material_number = None
|
||||||
|
station.heating_progress = 0.0
|
||||||
|
|
||||||
|
# 初始化状态
|
||||||
|
self.data.update(
|
||||||
|
{
|
||||||
|
"status": "Ready",
|
||||||
|
"arm_state": ArmState.IDLE.value,
|
||||||
|
"arm_current_task": None,
|
||||||
|
"heating_stations": self._get_stations_status(),
|
||||||
|
"active_tasks_count": 0,
|
||||||
|
"message": "工作台就绪",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
self.logger.info(f"工作台初始化完成: {self.NUM_HEATING_STATIONS}个加热台就绪")
|
||||||
|
return True
|
||||||
|
|
||||||
|
@not_action
|
||||||
|
def cleanup(self) -> bool:
|
||||||
|
"""清理虚拟工作台"""
|
||||||
|
self.logger.info(f"清理虚拟工作台 {self.device_id}")
|
||||||
|
|
||||||
|
self._arm_state = ArmState.IDLE
|
||||||
|
self._arm_current_task = None
|
||||||
|
|
||||||
|
with self._stations_lock:
|
||||||
|
self._heating_stations.clear()
|
||||||
|
|
||||||
|
with self._tasks_lock:
|
||||||
|
self._active_tasks.clear()
|
||||||
|
|
||||||
|
self.data.update(
|
||||||
|
{
|
||||||
|
"status": "Offline",
|
||||||
|
"arm_state": ArmState.IDLE.value,
|
||||||
|
"heating_stations": {},
|
||||||
|
"message": "工作台已关闭",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _get_stations_status(self) -> Dict[int, Dict[str, Any]]:
|
||||||
|
"""获取所有加热台状态"""
|
||||||
|
with self._stations_lock:
|
||||||
|
return {
|
||||||
|
station_id: {
|
||||||
|
"state": station.state.value,
|
||||||
|
"current_material": station.current_material,
|
||||||
|
"material_number": station.material_number,
|
||||||
|
"heating_progress": station.heating_progress,
|
||||||
|
}
|
||||||
|
for station_id, station in self._heating_stations.items()
|
||||||
|
}
|
||||||
|
|
||||||
|
def _update_data_status(self, message: Optional[str] = None):
|
||||||
|
"""更新状态数据"""
|
||||||
|
self.data.update(
|
||||||
|
{
|
||||||
|
"arm_state": self._arm_state.value,
|
||||||
|
"arm_current_task": self._arm_current_task,
|
||||||
|
"heating_stations": self._get_stations_status(),
|
||||||
|
"active_tasks_count": len(self._active_tasks),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if message:
|
||||||
|
self.data["message"] = message
|
||||||
|
|
||||||
|
def _find_available_heating_station(self) -> Optional[int]:
|
||||||
|
"""查找空闲的加热台
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
空闲加热台ID,如果没有则返回None
|
||||||
|
"""
|
||||||
|
with self._stations_lock:
|
||||||
|
for station_id, station in self._heating_stations.items():
|
||||||
|
if station.state == HeatingStationState.IDLE:
|
||||||
|
return station_id
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _acquire_arm(self, task_description: str) -> bool:
|
||||||
|
"""获取机械臂使用权(阻塞直到获取)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
task_description: 任务描述,用于日志
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
是否成功获取
|
||||||
|
"""
|
||||||
|
self.logger.info(f"[{task_description}] 等待获取机械臂...")
|
||||||
|
|
||||||
|
# 阻塞等待获取锁
|
||||||
|
self._arm_lock.acquire()
|
||||||
|
|
||||||
|
self._arm_state = ArmState.BUSY
|
||||||
|
self._arm_current_task = task_description
|
||||||
|
self._update_data_status(f"机械臂执行: {task_description}")
|
||||||
|
|
||||||
|
self.logger.info(f"[{task_description}] 成功获取机械臂使用权")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _release_arm(self):
|
||||||
|
"""释放机械臂"""
|
||||||
|
task = self._arm_current_task
|
||||||
|
self._arm_state = ArmState.IDLE
|
||||||
|
self._arm_current_task = None
|
||||||
|
self._arm_lock.release()
|
||||||
|
self._update_data_status(f"机械臂已释放 (完成: {task})")
|
||||||
|
self.logger.info(f"机械臂已释放 (完成: {task})")
|
||||||
|
|
||||||
|
def prepare_materials(
|
||||||
|
self,
|
||||||
|
sample_uuids: SampleUUIDsType,
|
||||||
|
count: int = 5,
|
||||||
|
) -> PrepareMaterialsResult:
|
||||||
|
"""
|
||||||
|
批量准备物料 - 虚拟起始节点
|
||||||
|
|
||||||
|
作为工作流的起始节点,生成指定数量的物料编号供后续节点使用。
|
||||||
|
输出5个handle (material_1 ~ material_5),分别对应实验1~5。
|
||||||
|
|
||||||
|
Args:
|
||||||
|
count: 待生成的物料数量,默认5 (生成 A1-A5)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
PrepareMaterialsResult: 包含 material_1 ~ material_5 用于传递给 move_to_heating_station
|
||||||
|
"""
|
||||||
|
# 生成物料列表 A1 - A{count}
|
||||||
|
materials = [i for i in range(1, count + 1)]
|
||||||
|
|
||||||
|
self.logger.info(f"[准备物料] 生成 {count} 个物料: " f"A1-A{count} -> material_1~material_{count}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"count": count,
|
||||||
|
"material_1": materials[0] if len(materials) > 0 else 0,
|
||||||
|
"material_2": materials[1] if len(materials) > 1 else 0,
|
||||||
|
"material_3": materials[2] if len(materials) > 2 else 0,
|
||||||
|
"material_4": materials[3] if len(materials) > 3 else 0,
|
||||||
|
"material_5": materials[4] if len(materials) > 4 else 0,
|
||||||
|
"message": f"已准备 {count} 个物料: A1-A{count}",
|
||||||
|
"unilabos_samples": [LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
def move_to_heating_station(
|
||||||
|
self,
|
||||||
|
sample_uuids: SampleUUIDsType,
|
||||||
|
material_number: int,
|
||||||
|
) -> MoveToHeatingStationResult:
|
||||||
|
"""
|
||||||
|
将物料从An位置移动到加热台
|
||||||
|
|
||||||
|
多线程并发调用时,会竞争机械臂使用权,并自动查找空闲加热台
|
||||||
|
|
||||||
|
Args:
|
||||||
|
material_number: 物料编号 (1-5)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
MoveToHeatingStationResult: 包含 station_id, material_number 等用于传递给下一个节点
|
||||||
|
"""
|
||||||
|
# 根据物料编号生成物料ID
|
||||||
|
material_id = f"A{material_number}"
|
||||||
|
task_desc = f"移动{material_id}到加热台"
|
||||||
|
self.logger.info(f"[任务] {task_desc} - 开始执行")
|
||||||
|
|
||||||
|
# 记录任务
|
||||||
|
with self._tasks_lock:
|
||||||
|
self._active_tasks[material_id] = {
|
||||||
|
"status": "waiting_for_arm",
|
||||||
|
"start_time": time.time(),
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# 步骤1: 等待获取机械臂使用权(竞争)
|
||||||
|
with self._tasks_lock:
|
||||||
|
self._active_tasks[material_id]["status"] = "waiting_for_arm"
|
||||||
|
self._acquire_arm(task_desc)
|
||||||
|
|
||||||
|
# 步骤2: 查找空闲加热台
|
||||||
|
with self._tasks_lock:
|
||||||
|
self._active_tasks[material_id]["status"] = "finding_station"
|
||||||
|
station_id = None
|
||||||
|
|
||||||
|
# 循环等待直到找到空闲加热台
|
||||||
|
while station_id is None:
|
||||||
|
station_id = self._find_available_heating_station()
|
||||||
|
if station_id is None:
|
||||||
|
self.logger.info(f"[{material_id}] 没有空闲加热台,等待中...")
|
||||||
|
# 释放机械臂,等待后重试
|
||||||
|
self._release_arm()
|
||||||
|
time.sleep(0.5)
|
||||||
|
self._acquire_arm(task_desc)
|
||||||
|
|
||||||
|
# 步骤3: 占用加热台 - 立即标记为OCCUPIED,防止其他任务选择同一加热台
|
||||||
|
with self._stations_lock:
|
||||||
|
self._heating_stations[station_id].state = HeatingStationState.OCCUPIED
|
||||||
|
self._heating_stations[station_id].current_material = material_id
|
||||||
|
self._heating_stations[station_id].material_number = material_number
|
||||||
|
|
||||||
|
# 步骤4: 模拟机械臂移动操作 (3秒)
|
||||||
|
with self._tasks_lock:
|
||||||
|
self._active_tasks[material_id]["status"] = "arm_moving"
|
||||||
|
self._active_tasks[material_id]["assigned_station"] = station_id
|
||||||
|
self.logger.info(f"[{material_id}] 机械臂正在移动到加热台{station_id}...")
|
||||||
|
|
||||||
|
time.sleep(self.ARM_OPERATION_TIME)
|
||||||
|
|
||||||
|
# 步骤5: 放入加热台完成
|
||||||
|
self._update_data_status(f"{material_id}已放入加热台{station_id}")
|
||||||
|
self.logger.info(f"[{material_id}] 已放入加热台{station_id} (用时{self.ARM_OPERATION_TIME}s)")
|
||||||
|
|
||||||
|
# 释放机械臂
|
||||||
|
self._release_arm()
|
||||||
|
|
||||||
|
with self._tasks_lock:
|
||||||
|
self._active_tasks[material_id]["status"] = "placed_on_station"
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"station_id": station_id,
|
||||||
|
"material_id": material_id,
|
||||||
|
"material_number": material_number,
|
||||||
|
"message": f"{material_id}已成功移动到加热台{station_id}",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"[{material_id}] 移动失败: {str(e)}")
|
||||||
|
if self._arm_lock.locked():
|
||||||
|
self._release_arm()
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"station_id": -1,
|
||||||
|
"material_id": material_id,
|
||||||
|
"material_number": material_number,
|
||||||
|
"message": f"移动失败: {str(e)}",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
def start_heating(
|
||||||
|
self,
|
||||||
|
sample_uuids: SampleUUIDsType,
|
||||||
|
station_id: int,
|
||||||
|
material_number: int,
|
||||||
|
) -> StartHeatingResult:
|
||||||
|
"""
|
||||||
|
启动指定加热台的加热程序
|
||||||
|
|
||||||
|
Args:
|
||||||
|
station_id: 加热台ID (1-3),从 move_to_heating_station 的 handle 传入
|
||||||
|
material_number: 物料编号,从 move_to_heating_station 的 handle 传入
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
StartHeatingResult: 包含 station_id, material_number 等用于传递给下一个节点
|
||||||
|
"""
|
||||||
|
self.logger.info(f"[加热台{station_id}] 开始加热")
|
||||||
|
|
||||||
|
if station_id not in self._heating_stations:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"station_id": station_id,
|
||||||
|
"material_id": "",
|
||||||
|
"material_number": material_number,
|
||||||
|
"message": f"无效的加热台ID: {station_id}",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations[station_id]
|
||||||
|
|
||||||
|
if station.current_material is None:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"station_id": station_id,
|
||||||
|
"material_id": "",
|
||||||
|
"material_number": material_number,
|
||||||
|
"message": f"加热台{station_id}上没有物料",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
if station.state == HeatingStationState.HEATING:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"station_id": station_id,
|
||||||
|
"material_id": station.current_material,
|
||||||
|
"material_number": material_number,
|
||||||
|
"message": f"加热台{station_id}已经在加热中",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
material_id = station.current_material
|
||||||
|
|
||||||
|
# 开始加热
|
||||||
|
station.state = HeatingStationState.HEATING
|
||||||
|
station.heating_start_time = time.time()
|
||||||
|
station.heating_progress = 0.0
|
||||||
|
|
||||||
|
with self._tasks_lock:
|
||||||
|
if material_id in self._active_tasks:
|
||||||
|
self._active_tasks[material_id]["status"] = "heating"
|
||||||
|
|
||||||
|
self._update_data_status(f"加热台{station_id}开始加热{material_id}")
|
||||||
|
|
||||||
|
# 模拟加热过程 (10秒)
|
||||||
|
start_time = time.time()
|
||||||
|
while True:
|
||||||
|
elapsed = time.time() - start_time
|
||||||
|
progress = min(100.0, (elapsed / self.HEATING_TIME) * 100)
|
||||||
|
|
||||||
|
with self._stations_lock:
|
||||||
|
self._heating_stations[station_id].heating_progress = progress
|
||||||
|
|
||||||
|
self._update_data_status(f"加热台{station_id}加热中: {progress:.1f}%")
|
||||||
|
|
||||||
|
if elapsed >= self.HEATING_TIME:
|
||||||
|
break
|
||||||
|
|
||||||
|
time.sleep(1.0)
|
||||||
|
|
||||||
|
# 加热完成
|
||||||
|
with self._stations_lock:
|
||||||
|
self._heating_stations[station_id].state = HeatingStationState.COMPLETED
|
||||||
|
self._heating_stations[station_id].heating_progress = 100.0
|
||||||
|
|
||||||
|
with self._tasks_lock:
|
||||||
|
if material_id in self._active_tasks:
|
||||||
|
self._active_tasks[material_id]["status"] = "heating_completed"
|
||||||
|
|
||||||
|
self._update_data_status(f"加热台{station_id}加热完成")
|
||||||
|
self.logger.info(f"[加热台{station_id}] {material_id}加热完成 (用时{self.HEATING_TIME}s)")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"station_id": station_id,
|
||||||
|
"material_id": material_id,
|
||||||
|
"material_number": material_number,
|
||||||
|
"message": f"加热台{station_id}加热完成",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
def move_to_output(
|
||||||
|
self,
|
||||||
|
sample_uuids: SampleUUIDsType,
|
||||||
|
station_id: int,
|
||||||
|
material_number: int,
|
||||||
|
) -> MoveToOutputResult:
|
||||||
|
"""
|
||||||
|
将物料从加热台移动到输出位置Cn
|
||||||
|
|
||||||
|
Args:
|
||||||
|
station_id: 加热台ID (1-3),从 start_heating 的 handle 传入
|
||||||
|
material_number: 物料编号,从 start_heating 的 handle 传入,用于确定输出位置 Cn
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
MoveToOutputResult: 包含执行结果
|
||||||
|
"""
|
||||||
|
output_number = material_number # 物料编号决定输出位置
|
||||||
|
|
||||||
|
if station_id not in self._heating_stations:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"station_id": station_id,
|
||||||
|
"material_id": "",
|
||||||
|
"output_position": f"C{output_number}",
|
||||||
|
"message": f"无效的加热台ID: {station_id}",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations[station_id]
|
||||||
|
material_id = station.current_material
|
||||||
|
|
||||||
|
if material_id is None:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"station_id": station_id,
|
||||||
|
"material_id": "",
|
||||||
|
"output_position": f"C{output_number}",
|
||||||
|
"message": f"加热台{station_id}上没有物料",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
if station.state != HeatingStationState.COMPLETED:
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"station_id": station_id,
|
||||||
|
"material_id": material_id,
|
||||||
|
"output_position": f"C{output_number}",
|
||||||
|
"message": f"加热台{station_id}尚未完成加热 (当前状态: {station.state.value})",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
output_position = f"C{output_number}"
|
||||||
|
task_desc = f"从加热台{station_id}移动{material_id}到{output_position}"
|
||||||
|
self.logger.info(f"[任务] {task_desc}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
with self._tasks_lock:
|
||||||
|
if material_id in self._active_tasks:
|
||||||
|
self._active_tasks[material_id]["status"] = "waiting_for_arm_output"
|
||||||
|
|
||||||
|
# 获取机械臂
|
||||||
|
self._acquire_arm(task_desc)
|
||||||
|
|
||||||
|
with self._tasks_lock:
|
||||||
|
if material_id in self._active_tasks:
|
||||||
|
self._active_tasks[material_id]["status"] = "arm_moving_to_output"
|
||||||
|
|
||||||
|
# 模拟机械臂操作 (3秒)
|
||||||
|
self.logger.info(f"[{material_id}] 机械臂正在从加热台{station_id}取出并移动到{output_position}...")
|
||||||
|
time.sleep(self.ARM_OPERATION_TIME)
|
||||||
|
|
||||||
|
# 清空加热台
|
||||||
|
with self._stations_lock:
|
||||||
|
self._heating_stations[station_id].state = HeatingStationState.IDLE
|
||||||
|
self._heating_stations[station_id].current_material = None
|
||||||
|
self._heating_stations[station_id].material_number = None
|
||||||
|
self._heating_stations[station_id].heating_progress = 0.0
|
||||||
|
self._heating_stations[station_id].heating_start_time = None
|
||||||
|
|
||||||
|
# 释放机械臂
|
||||||
|
self._release_arm()
|
||||||
|
|
||||||
|
# 任务完成
|
||||||
|
with self._tasks_lock:
|
||||||
|
if material_id in self._active_tasks:
|
||||||
|
self._active_tasks[material_id]["status"] = "completed"
|
||||||
|
self._active_tasks[material_id]["end_time"] = time.time()
|
||||||
|
|
||||||
|
self._update_data_status(f"{material_id}已移动到{output_position}")
|
||||||
|
self.logger.info(f"[{material_id}] 已成功移动到{output_position} (用时{self.ARM_OPERATION_TIME}s)")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"station_id": station_id,
|
||||||
|
"material_id": material_id,
|
||||||
|
"output_position": output_position,
|
||||||
|
"message": f"{material_id}已成功移动到{output_position}",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"移动到输出位置失败: {str(e)}")
|
||||||
|
if self._arm_lock.locked():
|
||||||
|
self._release_arm()
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"station_id": station_id,
|
||||||
|
"material_id": "",
|
||||||
|
"output_position": output_position,
|
||||||
|
"message": f"移动失败: {str(e)}",
|
||||||
|
"unilabos_samples": [
|
||||||
|
LabSample(sample_uuid=sample_uuid, oss_path="", extra={"material_uuid": content} if isinstance(content, str) else content.serialize()) for
|
||||||
|
sample_uuid, content in sample_uuids.items()]
|
||||||
|
}
|
||||||
|
|
||||||
|
# ============ 状态属性 ============
|
||||||
|
|
||||||
|
@property
|
||||||
|
def status(self) -> str:
|
||||||
|
return self.data.get("status", "Unknown")
|
||||||
|
|
||||||
|
@property
|
||||||
|
def arm_state(self) -> str:
|
||||||
|
return self._arm_state.value
|
||||||
|
|
||||||
|
@property
|
||||||
|
def arm_current_task(self) -> str:
|
||||||
|
return self._arm_current_task or ""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def heating_station_1_state(self) -> str:
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations.get(1)
|
||||||
|
return station.state.value if station else "unknown"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def heating_station_1_material(self) -> str:
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations.get(1)
|
||||||
|
return station.current_material or "" if station else ""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def heating_station_1_progress(self) -> float:
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations.get(1)
|
||||||
|
return station.heating_progress if station else 0.0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def heating_station_2_state(self) -> str:
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations.get(2)
|
||||||
|
return station.state.value if station else "unknown"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def heating_station_2_material(self) -> str:
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations.get(2)
|
||||||
|
return station.current_material or "" if station else ""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def heating_station_2_progress(self) -> float:
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations.get(2)
|
||||||
|
return station.heating_progress if station else 0.0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def heating_station_3_state(self) -> str:
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations.get(3)
|
||||||
|
return station.state.value if station else "unknown"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def heating_station_3_material(self) -> str:
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations.get(3)
|
||||||
|
return station.current_material or "" if station else ""
|
||||||
|
|
||||||
|
@property
|
||||||
|
def heating_station_3_progress(self) -> float:
|
||||||
|
with self._stations_lock:
|
||||||
|
station = self._heating_stations.get(3)
|
||||||
|
return station.heating_progress if station else 0.0
|
||||||
|
|
||||||
|
@property
|
||||||
|
def active_tasks_count(self) -> int:
|
||||||
|
with self._tasks_lock:
|
||||||
|
return len(self._active_tasks)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def message(self) -> str:
|
||||||
|
return self.data.get("message", "")
|
||||||
0
unilabos/devices/xrd_d7mate/__init__.py
Normal file
0
unilabos/devices/xrd_d7mate/__init__.py
Normal file
0
unilabos/devices/zhida_hplc/__init__.py
Normal file
0
unilabos/devices/zhida_hplc/__init__.py
Normal file
@@ -638,7 +638,7 @@ liquid_handler:
|
|||||||
placeholder_keys: {}
|
placeholder_keys: {}
|
||||||
result: {}
|
result: {}
|
||||||
schema:
|
schema:
|
||||||
description: 吸头迭代函数。用于自动管理和切换吸头架中的吸头,实现批量实验中的吸头自动分配和追踪。该函数监控吸头使用状态,自动切换到下一个可用吸头位置,确保实验流程的连续性。适用于高通量实验、批量处理、自动化流水线等需要大量吸头管理的应用场景。
|
description: 吸头迭代函数。用于自动管理和切换枪头盒中的吸头,实现批量实验中的吸头自动分配和追踪。该函数监控吸头使用状态,自动切换到下一个可用吸头位置,确保实验流程的连续性。适用于高通量实验、批量处理、自动化流水线等需要大量吸头管理的应用场景。
|
||||||
properties:
|
properties:
|
||||||
feedback: {}
|
feedback: {}
|
||||||
goal:
|
goal:
|
||||||
@@ -712,6 +712,43 @@ liquid_handler:
|
|||||||
title: set_group参数
|
title: set_group参数
|
||||||
type: object
|
type: object
|
||||||
type: UniLabJsonCommand
|
type: UniLabJsonCommand
|
||||||
|
auto-set_liquid_from_plate:
|
||||||
|
feedback: {}
|
||||||
|
goal: {}
|
||||||
|
goal_default:
|
||||||
|
liquid_names: null
|
||||||
|
plate: null
|
||||||
|
volumes: null
|
||||||
|
well_names: null
|
||||||
|
handles: {}
|
||||||
|
placeholder_keys: {}
|
||||||
|
result: {}
|
||||||
|
schema:
|
||||||
|
description: ''
|
||||||
|
properties:
|
||||||
|
feedback: {}
|
||||||
|
goal:
|
||||||
|
properties:
|
||||||
|
liquid_names:
|
||||||
|
type: string
|
||||||
|
plate:
|
||||||
|
type: string
|
||||||
|
volumes:
|
||||||
|
type: string
|
||||||
|
well_names:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- plate
|
||||||
|
- well_names
|
||||||
|
- liquid_names
|
||||||
|
- volumes
|
||||||
|
type: object
|
||||||
|
result: {}
|
||||||
|
required:
|
||||||
|
- goal
|
||||||
|
title: set_liquid_from_plate参数
|
||||||
|
type: object
|
||||||
|
type: UniLabJsonCommand
|
||||||
auto-set_tiprack:
|
auto-set_tiprack:
|
||||||
feedback: {}
|
feedback: {}
|
||||||
goal: {}
|
goal: {}
|
||||||
@@ -721,7 +758,7 @@ liquid_handler:
|
|||||||
placeholder_keys: {}
|
placeholder_keys: {}
|
||||||
result: {}
|
result: {}
|
||||||
schema:
|
schema:
|
||||||
description: 吸头架设置函数。用于配置和初始化液体处理系统的吸头架信息,包括吸头架位置、类型、容量等参数。该函数建立吸头资源管理系统,为后续的吸头选择和使用提供基础配置。适用于系统初始化、吸头架更换、实验配置等需要吸头资源管理的操作场景。
|
description: 枪头盒设置函数。用于配置和初始化液体处理系统的枪头盒信息,包括枪头盒位置、类型、容量等参数。该函数建立吸头资源管理系统,为后续的吸头选择和使用提供基础配置。适用于系统初始化、枪头盒更换、实验配置等需要吸头资源管理的操作场景。
|
||||||
properties:
|
properties:
|
||||||
feedback: {}
|
feedback: {}
|
||||||
goal:
|
goal:
|
||||||
@@ -4019,8 +4056,7 @@ liquid_handler:
|
|||||||
mix_liquid_height: 0.0
|
mix_liquid_height: 0.0
|
||||||
mix_rate: 0
|
mix_rate: 0
|
||||||
mix_stage: ''
|
mix_stage: ''
|
||||||
mix_times:
|
mix_times: 0
|
||||||
- 0
|
|
||||||
mix_vol: 0
|
mix_vol: 0
|
||||||
none_keys:
|
none_keys:
|
||||||
- ''
|
- ''
|
||||||
@@ -4094,32 +4130,43 @@ liquid_handler:
|
|||||||
- 0
|
- 0
|
||||||
handles:
|
handles:
|
||||||
input:
|
input:
|
||||||
- data_key: liquid
|
- data_key: sources
|
||||||
data_source: handle
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: sources
|
handler_key: sources
|
||||||
label: sources
|
label: sources
|
||||||
- data_key: liquid
|
- data_key: targets
|
||||||
data_source: executor
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: targets
|
handler_key: targets
|
||||||
label: targets
|
label: targets
|
||||||
- data_key: liquid
|
- data_key: tip_racks
|
||||||
data_source: executor
|
data_source: handle
|
||||||
|
data_type: resource
|
||||||
|
handler_key: tip_racks
|
||||||
|
label: tip_racks
|
||||||
|
output:
|
||||||
|
- data_key: sources
|
||||||
|
data_source: handle
|
||||||
|
data_type: resource
|
||||||
|
handler_key: targets
|
||||||
|
label: 转移目标
|
||||||
|
- data_key: tip_racks
|
||||||
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: tip_rack
|
handler_key: tip_rack
|
||||||
label: tip_rack
|
label: 枪头盒
|
||||||
output:
|
output:
|
||||||
- data_key: liquid
|
- data_key: sources.@flatten
|
||||||
data_source: handle
|
data_source: executor
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: sources_out
|
handler_key: sources_out
|
||||||
label: sources
|
label: sources
|
||||||
- data_key: liquid
|
- data_key: targets
|
||||||
data_source: executor
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: targets_out
|
handler_key: targets_out
|
||||||
label: targets
|
label: 移液后目标孔
|
||||||
placeholder_keys:
|
placeholder_keys:
|
||||||
sources: unilabos_resources
|
sources: unilabos_resources
|
||||||
targets: unilabos_resources
|
targets: unilabos_resources
|
||||||
@@ -4176,11 +4223,9 @@ liquid_handler:
|
|||||||
mix_stage:
|
mix_stage:
|
||||||
type: string
|
type: string
|
||||||
mix_times:
|
mix_times:
|
||||||
items:
|
maximum: 2147483647
|
||||||
maximum: 2147483647
|
minimum: -2147483648
|
||||||
minimum: -2147483648
|
type: integer
|
||||||
type: integer
|
|
||||||
type: array
|
|
||||||
mix_vol:
|
mix_vol:
|
||||||
maximum: 2147483647
|
maximum: 2147483647
|
||||||
minimum: -2147483648
|
minimum: -2147483648
|
||||||
@@ -4767,13 +4812,13 @@ liquid_handler.biomek:
|
|||||||
targets: ''
|
targets: ''
|
||||||
handles:
|
handles:
|
||||||
input:
|
input:
|
||||||
- data_key: liquid
|
- data_key: sources
|
||||||
data_source: handle
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: sources
|
handler_key: sources
|
||||||
label: sources
|
label: sources
|
||||||
output:
|
output:
|
||||||
- data_key: liquid
|
- data_key: targets
|
||||||
data_source: handle
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: targets
|
handler_key: targets
|
||||||
@@ -4926,29 +4971,29 @@ liquid_handler.biomek:
|
|||||||
volume: 0.0
|
volume: 0.0
|
||||||
handles:
|
handles:
|
||||||
input:
|
input:
|
||||||
- data_key: liquid
|
- data_key: sources
|
||||||
data_source: handle
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: sources
|
handler_key: sources
|
||||||
label: sources
|
label: sources
|
||||||
- data_key: liquid
|
- data_key: targets
|
||||||
data_source: executor
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: targets
|
handler_key: targets
|
||||||
label: targets
|
label: targets
|
||||||
- data_key: liquid
|
- data_key: tip_racks
|
||||||
data_source: executor
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: tip_rack
|
handler_key: tip_racks
|
||||||
label: tip_rack
|
label: tip_racks
|
||||||
output:
|
output:
|
||||||
- data_key: liquid
|
- data_key: sources
|
||||||
data_source: handle
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: sources_out
|
handler_key: sources_out
|
||||||
label: sources
|
label: sources
|
||||||
- data_key: liquid
|
- data_key: targets
|
||||||
data_source: executor
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: targets_out
|
handler_key: targets_out
|
||||||
label: targets
|
label: targets
|
||||||
@@ -5043,8 +5088,7 @@ liquid_handler.biomek:
|
|||||||
mix_liquid_height: 0.0
|
mix_liquid_height: 0.0
|
||||||
mix_rate: 0
|
mix_rate: 0
|
||||||
mix_stage: ''
|
mix_stage: ''
|
||||||
mix_times:
|
mix_times: 0
|
||||||
- 0
|
|
||||||
mix_vol: 0
|
mix_vol: 0
|
||||||
none_keys:
|
none_keys:
|
||||||
- ''
|
- ''
|
||||||
@@ -5118,19 +5162,32 @@ liquid_handler.biomek:
|
|||||||
- 0
|
- 0
|
||||||
handles:
|
handles:
|
||||||
input:
|
input:
|
||||||
- data_key: liquid
|
- data_key: sources
|
||||||
data_source: handle
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: liquid-input
|
handler_key: sources
|
||||||
io_type: target
|
label: sources
|
||||||
label: Liquid Input
|
- data_key: targets
|
||||||
output:
|
data_source: handle
|
||||||
- data_key: liquid
|
|
||||||
data_source: executor
|
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: liquid-output
|
handler_key: targets
|
||||||
io_type: source
|
label: targets
|
||||||
label: Liquid Output
|
- data_key: tip_racks
|
||||||
|
data_source: handle
|
||||||
|
data_type: resource
|
||||||
|
handler_key: tip_racks
|
||||||
|
label: tip_racks
|
||||||
|
output:
|
||||||
|
- data_key: sources
|
||||||
|
data_source: handle
|
||||||
|
data_type: resource
|
||||||
|
handler_key: sources_out
|
||||||
|
label: sources
|
||||||
|
- data_key: targets
|
||||||
|
data_source: handle
|
||||||
|
data_type: resource
|
||||||
|
handler_key: targets_out
|
||||||
|
label: targets
|
||||||
placeholder_keys:
|
placeholder_keys:
|
||||||
sources: unilabos_resources
|
sources: unilabos_resources
|
||||||
targets: unilabos_resources
|
targets: unilabos_resources
|
||||||
@@ -5187,11 +5244,9 @@ liquid_handler.biomek:
|
|||||||
mix_stage:
|
mix_stage:
|
||||||
type: string
|
type: string
|
||||||
mix_times:
|
mix_times:
|
||||||
items:
|
maximum: 2147483647
|
||||||
maximum: 2147483647
|
minimum: -2147483648
|
||||||
minimum: -2147483648
|
type: integer
|
||||||
type: integer
|
|
||||||
type: array
|
|
||||||
mix_vol:
|
mix_vol:
|
||||||
maximum: 2147483647
|
maximum: 2147483647
|
||||||
minimum: -2147483648
|
minimum: -2147483648
|
||||||
@@ -7610,6 +7665,43 @@ liquid_handler.prcxi:
|
|||||||
title: iter_tips参数
|
title: iter_tips参数
|
||||||
type: object
|
type: object
|
||||||
type: UniLabJsonCommand
|
type: UniLabJsonCommand
|
||||||
|
auto-magnetic_action:
|
||||||
|
feedback: {}
|
||||||
|
goal: {}
|
||||||
|
goal_default:
|
||||||
|
height: null
|
||||||
|
is_wait: null
|
||||||
|
module_no: null
|
||||||
|
time: null
|
||||||
|
handles: {}
|
||||||
|
placeholder_keys: {}
|
||||||
|
result: {}
|
||||||
|
schema:
|
||||||
|
description: ''
|
||||||
|
properties:
|
||||||
|
feedback: {}
|
||||||
|
goal:
|
||||||
|
properties:
|
||||||
|
height:
|
||||||
|
type: integer
|
||||||
|
is_wait:
|
||||||
|
type: boolean
|
||||||
|
module_no:
|
||||||
|
type: integer
|
||||||
|
time:
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- time
|
||||||
|
- module_no
|
||||||
|
- height
|
||||||
|
- is_wait
|
||||||
|
type: object
|
||||||
|
result: {}
|
||||||
|
required:
|
||||||
|
- goal
|
||||||
|
title: magnetic_action参数
|
||||||
|
type: object
|
||||||
|
type: UniLabJsonCommandAsync
|
||||||
auto-move_to:
|
auto-move_to:
|
||||||
feedback: {}
|
feedback: {}
|
||||||
goal: {}
|
goal: {}
|
||||||
@@ -7643,6 +7735,31 @@ liquid_handler.prcxi:
|
|||||||
title: move_to参数
|
title: move_to参数
|
||||||
type: object
|
type: object
|
||||||
type: UniLabJsonCommandAsync
|
type: UniLabJsonCommandAsync
|
||||||
|
auto-plr_pos_to_prcxi:
|
||||||
|
feedback: {}
|
||||||
|
goal: {}
|
||||||
|
goal_default:
|
||||||
|
resource: null
|
||||||
|
handles: {}
|
||||||
|
placeholder_keys: {}
|
||||||
|
result: {}
|
||||||
|
schema:
|
||||||
|
description: ''
|
||||||
|
properties:
|
||||||
|
feedback: {}
|
||||||
|
goal:
|
||||||
|
properties:
|
||||||
|
resource:
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- resource
|
||||||
|
type: object
|
||||||
|
result: {}
|
||||||
|
required:
|
||||||
|
- goal
|
||||||
|
title: plr_pos_to_prcxi参数
|
||||||
|
type: object
|
||||||
|
type: UniLabJsonCommand
|
||||||
auto-post_init:
|
auto-post_init:
|
||||||
feedback: {}
|
feedback: {}
|
||||||
goal: {}
|
goal: {}
|
||||||
@@ -7763,6 +7880,47 @@ liquid_handler.prcxi:
|
|||||||
title: shaker_action参数
|
title: shaker_action参数
|
||||||
type: object
|
type: object
|
||||||
type: UniLabJsonCommandAsync
|
type: UniLabJsonCommandAsync
|
||||||
|
auto-shaking_incubation_action:
|
||||||
|
feedback: {}
|
||||||
|
goal: {}
|
||||||
|
goal_default:
|
||||||
|
amplitude: null
|
||||||
|
is_wait: null
|
||||||
|
module_no: null
|
||||||
|
temperature: null
|
||||||
|
time: null
|
||||||
|
handles: {}
|
||||||
|
placeholder_keys: {}
|
||||||
|
result: {}
|
||||||
|
schema:
|
||||||
|
description: ''
|
||||||
|
properties:
|
||||||
|
feedback: {}
|
||||||
|
goal:
|
||||||
|
properties:
|
||||||
|
amplitude:
|
||||||
|
type: integer
|
||||||
|
is_wait:
|
||||||
|
type: boolean
|
||||||
|
module_no:
|
||||||
|
type: integer
|
||||||
|
temperature:
|
||||||
|
type: integer
|
||||||
|
time:
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- time
|
||||||
|
- module_no
|
||||||
|
- amplitude
|
||||||
|
- is_wait
|
||||||
|
- temperature
|
||||||
|
type: object
|
||||||
|
result: {}
|
||||||
|
required:
|
||||||
|
- goal
|
||||||
|
title: shaking_incubation_action参数
|
||||||
|
type: object
|
||||||
|
type: UniLabJsonCommandAsync
|
||||||
auto-touch_tip:
|
auto-touch_tip:
|
||||||
feedback: {}
|
feedback: {}
|
||||||
goal: {}
|
goal: {}
|
||||||
@@ -8497,7 +8655,19 @@ liquid_handler.prcxi:
|
|||||||
z: 0.0
|
z: 0.0
|
||||||
sample_id: ''
|
sample_id: ''
|
||||||
type: ''
|
type: ''
|
||||||
handles: {}
|
handles:
|
||||||
|
input:
|
||||||
|
- data_key: plate
|
||||||
|
data_source: handle
|
||||||
|
data_type: resource
|
||||||
|
handler_key: plate
|
||||||
|
label: plate
|
||||||
|
output:
|
||||||
|
- data_key: plate
|
||||||
|
data_source: handle
|
||||||
|
data_type: resource
|
||||||
|
handler_key: plate
|
||||||
|
label: plate
|
||||||
placeholder_keys:
|
placeholder_keys:
|
||||||
plate: unilabos_resources
|
plate: unilabos_resources
|
||||||
to: unilabos_resources
|
to: unilabos_resources
|
||||||
@@ -9290,7 +9460,13 @@ liquid_handler.prcxi:
|
|||||||
data_source: handle
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: input_wells
|
handler_key: input_wells
|
||||||
label: InputWells
|
label: 待设定液体孔
|
||||||
|
output:
|
||||||
|
- data_key: wells.@flatten
|
||||||
|
data_source: executor
|
||||||
|
data_type: resource
|
||||||
|
handler_key: output_wells
|
||||||
|
label: 已设定液体孔
|
||||||
placeholder_keys:
|
placeholder_keys:
|
||||||
wells: unilabos_resources
|
wells: unilabos_resources
|
||||||
result: {}
|
result: {}
|
||||||
@@ -9406,6 +9582,352 @@ liquid_handler.prcxi:
|
|||||||
title: LiquidHandlerSetLiquid
|
title: LiquidHandlerSetLiquid
|
||||||
type: object
|
type: object
|
||||||
type: LiquidHandlerSetLiquid
|
type: LiquidHandlerSetLiquid
|
||||||
|
set_liquid_from_plate:
|
||||||
|
feedback: {}
|
||||||
|
goal: {}
|
||||||
|
goal_default:
|
||||||
|
liquid_names: null
|
||||||
|
plate: null
|
||||||
|
volumes: null
|
||||||
|
well_names: null
|
||||||
|
handles:
|
||||||
|
input:
|
||||||
|
- data_key: '@this.0@@@plate'
|
||||||
|
data_source: handle
|
||||||
|
data_type: resource
|
||||||
|
handler_key: input_plate
|
||||||
|
label: 待设定液体板
|
||||||
|
output:
|
||||||
|
- data_key: plate.@flatten
|
||||||
|
data_source: executor
|
||||||
|
data_type: resource
|
||||||
|
handler_key: output_plate
|
||||||
|
label: 已设定液体板
|
||||||
|
- data_key: wells.@flatten
|
||||||
|
data_source: executor
|
||||||
|
data_type: resource
|
||||||
|
handler_key: output_wells
|
||||||
|
label: 已设定液体孔
|
||||||
|
- data_key: volumes
|
||||||
|
data_source: executor
|
||||||
|
data_type: number_array
|
||||||
|
handler_key: output_volumes
|
||||||
|
label: 各孔设定体积
|
||||||
|
placeholder_keys:
|
||||||
|
plate: unilabos_resources
|
||||||
|
result: {}
|
||||||
|
schema:
|
||||||
|
description: ''
|
||||||
|
properties:
|
||||||
|
feedback: {}
|
||||||
|
goal:
|
||||||
|
properties:
|
||||||
|
liquid_names:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
plate:
|
||||||
|
properties:
|
||||||
|
category:
|
||||||
|
type: string
|
||||||
|
children:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
config:
|
||||||
|
type: string
|
||||||
|
data:
|
||||||
|
type: string
|
||||||
|
id:
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
parent:
|
||||||
|
type: string
|
||||||
|
pose:
|
||||||
|
properties:
|
||||||
|
orientation:
|
||||||
|
properties:
|
||||||
|
w:
|
||||||
|
type: number
|
||||||
|
x:
|
||||||
|
type: number
|
||||||
|
y:
|
||||||
|
type: number
|
||||||
|
z:
|
||||||
|
type: number
|
||||||
|
required:
|
||||||
|
- x
|
||||||
|
- y
|
||||||
|
- z
|
||||||
|
- w
|
||||||
|
title: orientation
|
||||||
|
type: object
|
||||||
|
position:
|
||||||
|
properties:
|
||||||
|
x:
|
||||||
|
type: number
|
||||||
|
y:
|
||||||
|
type: number
|
||||||
|
z:
|
||||||
|
type: number
|
||||||
|
required:
|
||||||
|
- x
|
||||||
|
- y
|
||||||
|
- z
|
||||||
|
title: position
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- position
|
||||||
|
- orientation
|
||||||
|
title: pose
|
||||||
|
type: object
|
||||||
|
sample_id:
|
||||||
|
type: string
|
||||||
|
type:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- id
|
||||||
|
- name
|
||||||
|
- sample_id
|
||||||
|
- children
|
||||||
|
- parent
|
||||||
|
- type
|
||||||
|
- category
|
||||||
|
- pose
|
||||||
|
- config
|
||||||
|
- data
|
||||||
|
title: plate
|
||||||
|
type: object
|
||||||
|
volumes:
|
||||||
|
items:
|
||||||
|
type: number
|
||||||
|
type: array
|
||||||
|
well_names:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- plate
|
||||||
|
- well_names
|
||||||
|
- liquid_names
|
||||||
|
- volumes
|
||||||
|
type: object
|
||||||
|
result:
|
||||||
|
$defs:
|
||||||
|
ResourceDict:
|
||||||
|
properties:
|
||||||
|
class:
|
||||||
|
description: Resource class name
|
||||||
|
title: Class
|
||||||
|
type: string
|
||||||
|
config:
|
||||||
|
additionalProperties: true
|
||||||
|
description: Resource configuration
|
||||||
|
title: Config
|
||||||
|
type: object
|
||||||
|
data:
|
||||||
|
additionalProperties: true
|
||||||
|
description: 'Resource data, eg: container liquid data'
|
||||||
|
title: Data
|
||||||
|
type: object
|
||||||
|
description:
|
||||||
|
default: ''
|
||||||
|
description: Resource description
|
||||||
|
title: Description
|
||||||
|
type: string
|
||||||
|
extra:
|
||||||
|
additionalProperties: true
|
||||||
|
description: 'Extra data, eg: slot index'
|
||||||
|
title: Extra
|
||||||
|
type: object
|
||||||
|
icon:
|
||||||
|
default: ''
|
||||||
|
description: Resource icon
|
||||||
|
title: Icon
|
||||||
|
type: string
|
||||||
|
id:
|
||||||
|
description: Resource ID
|
||||||
|
title: Id
|
||||||
|
type: string
|
||||||
|
model:
|
||||||
|
additionalProperties: true
|
||||||
|
description: Resource model
|
||||||
|
title: Model
|
||||||
|
type: object
|
||||||
|
name:
|
||||||
|
description: Resource name
|
||||||
|
title: Name
|
||||||
|
type: string
|
||||||
|
parent:
|
||||||
|
anyOf:
|
||||||
|
- $ref: '#/$defs/ResourceDict'
|
||||||
|
- type: 'null'
|
||||||
|
default: null
|
||||||
|
description: Parent resource object
|
||||||
|
parent_uuid:
|
||||||
|
anyOf:
|
||||||
|
- type: string
|
||||||
|
- type: 'null'
|
||||||
|
default: null
|
||||||
|
description: Parent resource uuid
|
||||||
|
title: Parent Uuid
|
||||||
|
pose:
|
||||||
|
$ref: '#/$defs/ResourceDictPosition'
|
||||||
|
description: Resource position
|
||||||
|
schema:
|
||||||
|
additionalProperties: true
|
||||||
|
description: Resource schema
|
||||||
|
title: Schema
|
||||||
|
type: object
|
||||||
|
type:
|
||||||
|
anyOf:
|
||||||
|
- const: device
|
||||||
|
type: string
|
||||||
|
- type: string
|
||||||
|
description: Resource type
|
||||||
|
title: Type
|
||||||
|
uuid:
|
||||||
|
description: Resource UUID
|
||||||
|
title: Uuid
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- id
|
||||||
|
- uuid
|
||||||
|
- name
|
||||||
|
- type
|
||||||
|
- class
|
||||||
|
- config
|
||||||
|
- data
|
||||||
|
- extra
|
||||||
|
title: ResourceDict
|
||||||
|
type: object
|
||||||
|
ResourceDictPosition:
|
||||||
|
properties:
|
||||||
|
cross_section_type:
|
||||||
|
default: rectangle
|
||||||
|
description: Cross section type
|
||||||
|
enum:
|
||||||
|
- rectangle
|
||||||
|
- circle
|
||||||
|
- rounded_rectangle
|
||||||
|
title: Cross Section Type
|
||||||
|
type: string
|
||||||
|
layout:
|
||||||
|
default: x-y
|
||||||
|
description: Resource layout
|
||||||
|
enum:
|
||||||
|
- 2d
|
||||||
|
- x-y
|
||||||
|
- z-y
|
||||||
|
- x-z
|
||||||
|
title: Layout
|
||||||
|
type: string
|
||||||
|
position:
|
||||||
|
$ref: '#/$defs/ResourceDictPositionObject'
|
||||||
|
description: Resource position
|
||||||
|
position3d:
|
||||||
|
$ref: '#/$defs/ResourceDictPositionObject'
|
||||||
|
description: Resource position in 3D space
|
||||||
|
rotation:
|
||||||
|
$ref: '#/$defs/ResourceDictPositionObject'
|
||||||
|
description: Resource rotation
|
||||||
|
scale:
|
||||||
|
$ref: '#/$defs/ResourceDictPositionScale'
|
||||||
|
description: Resource scale
|
||||||
|
size:
|
||||||
|
$ref: '#/$defs/ResourceDictPositionSize'
|
||||||
|
description: Resource size
|
||||||
|
title: ResourceDictPosition
|
||||||
|
type: object
|
||||||
|
ResourceDictPositionObject:
|
||||||
|
properties:
|
||||||
|
x:
|
||||||
|
default: 0.0
|
||||||
|
description: X coordinate
|
||||||
|
title: X
|
||||||
|
type: number
|
||||||
|
y:
|
||||||
|
default: 0.0
|
||||||
|
description: Y coordinate
|
||||||
|
title: Y
|
||||||
|
type: number
|
||||||
|
z:
|
||||||
|
default: 0.0
|
||||||
|
description: Z coordinate
|
||||||
|
title: Z
|
||||||
|
type: number
|
||||||
|
title: ResourceDictPositionObject
|
||||||
|
type: object
|
||||||
|
ResourceDictPositionScale:
|
||||||
|
properties:
|
||||||
|
x:
|
||||||
|
default: 0.0
|
||||||
|
description: x scale
|
||||||
|
title: X
|
||||||
|
type: number
|
||||||
|
y:
|
||||||
|
default: 0.0
|
||||||
|
description: y scale
|
||||||
|
title: Y
|
||||||
|
type: number
|
||||||
|
z:
|
||||||
|
default: 0.0
|
||||||
|
description: z scale
|
||||||
|
title: Z
|
||||||
|
type: number
|
||||||
|
title: ResourceDictPositionScale
|
||||||
|
type: object
|
||||||
|
ResourceDictPositionSize:
|
||||||
|
properties:
|
||||||
|
depth:
|
||||||
|
default: 0.0
|
||||||
|
description: Depth
|
||||||
|
title: Depth
|
||||||
|
type: number
|
||||||
|
height:
|
||||||
|
default: 0.0
|
||||||
|
description: Height
|
||||||
|
title: Height
|
||||||
|
type: number
|
||||||
|
width:
|
||||||
|
default: 0.0
|
||||||
|
description: Width
|
||||||
|
title: Width
|
||||||
|
type: number
|
||||||
|
title: ResourceDictPositionSize
|
||||||
|
type: object
|
||||||
|
properties:
|
||||||
|
plate:
|
||||||
|
items:
|
||||||
|
items:
|
||||||
|
$ref: '#/$defs/ResourceDict'
|
||||||
|
type: array
|
||||||
|
title: Plate
|
||||||
|
type: array
|
||||||
|
volumes:
|
||||||
|
items:
|
||||||
|
type: number
|
||||||
|
title: Volumes
|
||||||
|
type: array
|
||||||
|
wells:
|
||||||
|
items:
|
||||||
|
items:
|
||||||
|
$ref: '#/$defs/ResourceDict'
|
||||||
|
type: array
|
||||||
|
title: Wells
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- plate
|
||||||
|
- wells
|
||||||
|
- volumes
|
||||||
|
title: SetLiquidFromPlateReturn
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- goal
|
||||||
|
title: set_liquid_from_plate参数
|
||||||
|
type: object
|
||||||
|
type: UniLabJsonCommand
|
||||||
set_tiprack:
|
set_tiprack:
|
||||||
feedback: {}
|
feedback: {}
|
||||||
goal:
|
goal:
|
||||||
@@ -9677,8 +10199,7 @@ liquid_handler.prcxi:
|
|||||||
mix_liquid_height: 0.0
|
mix_liquid_height: 0.0
|
||||||
mix_rate: 0
|
mix_rate: 0
|
||||||
mix_stage: ''
|
mix_stage: ''
|
||||||
mix_times:
|
mix_times: 0
|
||||||
- 0
|
|
||||||
mix_vol: 0
|
mix_vol: 0
|
||||||
none_keys:
|
none_keys:
|
||||||
- ''
|
- ''
|
||||||
@@ -9752,32 +10273,32 @@ liquid_handler.prcxi:
|
|||||||
- 0
|
- 0
|
||||||
handles:
|
handles:
|
||||||
input:
|
input:
|
||||||
- data_key: liquid
|
- data_key: sources
|
||||||
data_source: handle
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: sources
|
handler_key: sources
|
||||||
label: sources
|
label: sources
|
||||||
- data_key: liquid
|
- data_key: targets
|
||||||
data_source: executor
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: targets
|
handler_key: targets
|
||||||
label: targets
|
label: targets
|
||||||
- data_key: liquid
|
- data_key: tip_racks
|
||||||
data_source: executor
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: tip_rack
|
handler_key: tip_racks
|
||||||
label: tip_rack
|
label: tip_racks
|
||||||
output:
|
output:
|
||||||
- data_key: liquid
|
- data_key: sources
|
||||||
data_source: handle
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: sources_out
|
handler_key: sources_out
|
||||||
label: sources
|
label: sources
|
||||||
- data_key: liquid
|
- data_key: targets
|
||||||
data_source: executor
|
data_source: handle
|
||||||
data_type: resource
|
data_type: resource
|
||||||
handler_key: targets_out
|
handler_key: targets_out
|
||||||
label: targets
|
label: 移液后目标孔
|
||||||
placeholder_keys:
|
placeholder_keys:
|
||||||
sources: unilabos_resources
|
sources: unilabos_resources
|
||||||
targets: unilabos_resources
|
targets: unilabos_resources
|
||||||
@@ -9834,11 +10355,9 @@ liquid_handler.prcxi:
|
|||||||
mix_stage:
|
mix_stage:
|
||||||
type: string
|
type: string
|
||||||
mix_times:
|
mix_times:
|
||||||
items:
|
maximum: 2147483647
|
||||||
maximum: 2147483647
|
minimum: -2147483648
|
||||||
minimum: -2147483648
|
type: integer
|
||||||
type: integer
|
|
||||||
type: array
|
|
||||||
mix_vol:
|
mix_vol:
|
||||||
maximum: 2147483647
|
maximum: 2147483647
|
||||||
minimum: -2147483648
|
minimum: -2147483648
|
||||||
@@ -10160,6 +10679,12 @@ liquid_handler.prcxi:
|
|||||||
type: string
|
type: string
|
||||||
deck:
|
deck:
|
||||||
type: object
|
type: object
|
||||||
|
deck_y:
|
||||||
|
default: 400
|
||||||
|
type: string
|
||||||
|
deck_z:
|
||||||
|
default: 300
|
||||||
|
type: string
|
||||||
host:
|
host:
|
||||||
type: string
|
type: string
|
||||||
is_9320:
|
is_9320:
|
||||||
@@ -10170,17 +10695,44 @@ liquid_handler.prcxi:
|
|||||||
type: string
|
type: string
|
||||||
port:
|
port:
|
||||||
type: integer
|
type: integer
|
||||||
|
rail_interval:
|
||||||
|
default: 0
|
||||||
|
type: string
|
||||||
|
rail_nums:
|
||||||
|
default: 4
|
||||||
|
type: string
|
||||||
|
rail_width:
|
||||||
|
default: 27.5
|
||||||
|
type: string
|
||||||
setup:
|
setup:
|
||||||
default: true
|
default: true
|
||||||
type: string
|
type: string
|
||||||
simulator:
|
simulator:
|
||||||
default: false
|
default: false
|
||||||
type: string
|
type: string
|
||||||
|
start_rail:
|
||||||
|
default: 2
|
||||||
|
type: string
|
||||||
step_mode:
|
step_mode:
|
||||||
default: false
|
default: false
|
||||||
type: string
|
type: string
|
||||||
timeout:
|
timeout:
|
||||||
type: number
|
type: number
|
||||||
|
x_increase:
|
||||||
|
default: -0.003636
|
||||||
|
type: string
|
||||||
|
x_offset:
|
||||||
|
default: -0.8
|
||||||
|
type: string
|
||||||
|
xy_coupling:
|
||||||
|
default: -0.0045
|
||||||
|
type: string
|
||||||
|
y_increase:
|
||||||
|
default: -0.003636
|
||||||
|
type: string
|
||||||
|
y_offset:
|
||||||
|
default: -37.98
|
||||||
|
type: string
|
||||||
required:
|
required:
|
||||||
- deck
|
- deck
|
||||||
- host
|
- host
|
||||||
|
|||||||
@@ -5792,3 +5792,481 @@ virtual_vacuum_pump:
|
|||||||
- status
|
- status
|
||||||
type: object
|
type: object
|
||||||
version: 1.0.0
|
version: 1.0.0
|
||||||
|
virtual_workbench:
|
||||||
|
category:
|
||||||
|
- virtual_device
|
||||||
|
class:
|
||||||
|
action_value_mappings:
|
||||||
|
auto-move_to_heating_station:
|
||||||
|
feedback: {}
|
||||||
|
goal: {}
|
||||||
|
goal_default:
|
||||||
|
material_number: null
|
||||||
|
handles:
|
||||||
|
input:
|
||||||
|
- data_key: material_number
|
||||||
|
data_source: handle
|
||||||
|
data_type: workbench_material
|
||||||
|
handler_key: material_input
|
||||||
|
label: 物料编号
|
||||||
|
output:
|
||||||
|
- data_key: station_id
|
||||||
|
data_source: executor
|
||||||
|
data_type: workbench_station
|
||||||
|
handler_key: heating_station_output
|
||||||
|
label: 加热台ID
|
||||||
|
- data_key: material_number
|
||||||
|
data_source: executor
|
||||||
|
data_type: workbench_material
|
||||||
|
handler_key: material_number_output
|
||||||
|
label: 物料编号
|
||||||
|
placeholder_keys: {}
|
||||||
|
result: {}
|
||||||
|
schema:
|
||||||
|
description: 将物料从An位置移动到空闲加热台,返回分配的加热台ID
|
||||||
|
properties:
|
||||||
|
feedback: {}
|
||||||
|
goal:
|
||||||
|
properties:
|
||||||
|
material_number:
|
||||||
|
description: 物料编号,1-5,物料ID自动生成为A{n}
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- material_number
|
||||||
|
type: object
|
||||||
|
result:
|
||||||
|
$defs:
|
||||||
|
LabSample:
|
||||||
|
properties:
|
||||||
|
extra:
|
||||||
|
additionalProperties: true
|
||||||
|
title: Extra
|
||||||
|
type: object
|
||||||
|
oss_path:
|
||||||
|
title: Oss Path
|
||||||
|
type: string
|
||||||
|
sample_uuid:
|
||||||
|
title: Sample Uuid
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- sample_uuid
|
||||||
|
- oss_path
|
||||||
|
- extra
|
||||||
|
title: LabSample
|
||||||
|
type: object
|
||||||
|
description: move_to_heating_station 返回类型
|
||||||
|
properties:
|
||||||
|
material_id:
|
||||||
|
title: Material Id
|
||||||
|
type: string
|
||||||
|
material_number:
|
||||||
|
title: Material Number
|
||||||
|
type: integer
|
||||||
|
message:
|
||||||
|
title: Message
|
||||||
|
type: string
|
||||||
|
station_id:
|
||||||
|
description: 分配的加热台ID
|
||||||
|
title: Station Id
|
||||||
|
type: integer
|
||||||
|
success:
|
||||||
|
title: Success
|
||||||
|
type: boolean
|
||||||
|
unilabos_samples:
|
||||||
|
items:
|
||||||
|
$ref: '#/$defs/LabSample'
|
||||||
|
title: Unilabos Samples
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- success
|
||||||
|
- station_id
|
||||||
|
- material_id
|
||||||
|
- material_number
|
||||||
|
- message
|
||||||
|
- unilabos_samples
|
||||||
|
title: MoveToHeatingStationResult
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- goal
|
||||||
|
title: move_to_heating_station参数
|
||||||
|
type: object
|
||||||
|
type: UniLabJsonCommand
|
||||||
|
auto-move_to_output:
|
||||||
|
feedback: {}
|
||||||
|
goal: {}
|
||||||
|
goal_default:
|
||||||
|
material_number: null
|
||||||
|
station_id: null
|
||||||
|
handles:
|
||||||
|
input:
|
||||||
|
- data_key: station_id
|
||||||
|
data_source: handle
|
||||||
|
data_type: workbench_station
|
||||||
|
handler_key: output_station_input
|
||||||
|
label: 加热台ID
|
||||||
|
- data_key: material_number
|
||||||
|
data_source: handle
|
||||||
|
data_type: workbench_material
|
||||||
|
handler_key: output_material_input
|
||||||
|
label: 物料编号
|
||||||
|
placeholder_keys: {}
|
||||||
|
result: {}
|
||||||
|
schema:
|
||||||
|
description: 将物料从加热台移动到输出位置Cn
|
||||||
|
properties:
|
||||||
|
feedback: {}
|
||||||
|
goal:
|
||||||
|
properties:
|
||||||
|
material_number:
|
||||||
|
description: 物料编号,用于确定输出位置Cn
|
||||||
|
type: integer
|
||||||
|
station_id:
|
||||||
|
description: 加热台ID,1-3,从上一节点传入
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- station_id
|
||||||
|
- material_number
|
||||||
|
type: object
|
||||||
|
result:
|
||||||
|
$defs:
|
||||||
|
LabSample:
|
||||||
|
properties:
|
||||||
|
extra:
|
||||||
|
additionalProperties: true
|
||||||
|
title: Extra
|
||||||
|
type: object
|
||||||
|
oss_path:
|
||||||
|
title: Oss Path
|
||||||
|
type: string
|
||||||
|
sample_uuid:
|
||||||
|
title: Sample Uuid
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- sample_uuid
|
||||||
|
- oss_path
|
||||||
|
- extra
|
||||||
|
title: LabSample
|
||||||
|
type: object
|
||||||
|
description: move_to_output 返回类型
|
||||||
|
properties:
|
||||||
|
material_id:
|
||||||
|
title: Material Id
|
||||||
|
type: string
|
||||||
|
station_id:
|
||||||
|
title: Station Id
|
||||||
|
type: integer
|
||||||
|
success:
|
||||||
|
title: Success
|
||||||
|
type: boolean
|
||||||
|
unilabos_samples:
|
||||||
|
items:
|
||||||
|
$ref: '#/$defs/LabSample'
|
||||||
|
title: Unilabos Samples
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- success
|
||||||
|
- station_id
|
||||||
|
- material_id
|
||||||
|
- unilabos_samples
|
||||||
|
title: MoveToOutputResult
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- goal
|
||||||
|
title: move_to_output参数
|
||||||
|
type: object
|
||||||
|
type: UniLabJsonCommand
|
||||||
|
auto-prepare_materials:
|
||||||
|
feedback: {}
|
||||||
|
goal: {}
|
||||||
|
goal_default:
|
||||||
|
count: 5
|
||||||
|
handles:
|
||||||
|
output:
|
||||||
|
- data_key: material_1
|
||||||
|
data_source: executor
|
||||||
|
data_type: workbench_material
|
||||||
|
handler_key: channel_1
|
||||||
|
label: 实验1
|
||||||
|
- data_key: material_2
|
||||||
|
data_source: executor
|
||||||
|
data_type: workbench_material
|
||||||
|
handler_key: channel_2
|
||||||
|
label: 实验2
|
||||||
|
- data_key: material_3
|
||||||
|
data_source: executor
|
||||||
|
data_type: workbench_material
|
||||||
|
handler_key: channel_3
|
||||||
|
label: 实验3
|
||||||
|
- data_key: material_4
|
||||||
|
data_source: executor
|
||||||
|
data_type: workbench_material
|
||||||
|
handler_key: channel_4
|
||||||
|
label: 实验4
|
||||||
|
- data_key: material_5
|
||||||
|
data_source: executor
|
||||||
|
data_type: workbench_material
|
||||||
|
handler_key: channel_5
|
||||||
|
label: 实验5
|
||||||
|
placeholder_keys: {}
|
||||||
|
result: {}
|
||||||
|
schema:
|
||||||
|
description: 批量准备物料 - 虚拟起始节点,生成A1-A5物料,输出5个handle供后续节点使用
|
||||||
|
properties:
|
||||||
|
feedback: {}
|
||||||
|
goal:
|
||||||
|
properties:
|
||||||
|
count:
|
||||||
|
default: 5
|
||||||
|
description: 待生成的物料数量,默认5 (生成 A1-A5)
|
||||||
|
type: integer
|
||||||
|
required: []
|
||||||
|
type: object
|
||||||
|
result:
|
||||||
|
$defs:
|
||||||
|
LabSample:
|
||||||
|
properties:
|
||||||
|
extra:
|
||||||
|
additionalProperties: true
|
||||||
|
title: Extra
|
||||||
|
type: object
|
||||||
|
oss_path:
|
||||||
|
title: Oss Path
|
||||||
|
type: string
|
||||||
|
sample_uuid:
|
||||||
|
title: Sample Uuid
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- sample_uuid
|
||||||
|
- oss_path
|
||||||
|
- extra
|
||||||
|
title: LabSample
|
||||||
|
type: object
|
||||||
|
description: prepare_materials 返回类型 - 批量准备物料
|
||||||
|
properties:
|
||||||
|
count:
|
||||||
|
title: Count
|
||||||
|
type: integer
|
||||||
|
material_1:
|
||||||
|
title: Material 1
|
||||||
|
type: integer
|
||||||
|
material_2:
|
||||||
|
title: Material 2
|
||||||
|
type: integer
|
||||||
|
material_3:
|
||||||
|
title: Material 3
|
||||||
|
type: integer
|
||||||
|
material_4:
|
||||||
|
title: Material 4
|
||||||
|
type: integer
|
||||||
|
material_5:
|
||||||
|
title: Material 5
|
||||||
|
type: integer
|
||||||
|
message:
|
||||||
|
title: Message
|
||||||
|
type: string
|
||||||
|
success:
|
||||||
|
title: Success
|
||||||
|
type: boolean
|
||||||
|
unilabos_samples:
|
||||||
|
items:
|
||||||
|
$ref: '#/$defs/LabSample'
|
||||||
|
title: Unilabos Samples
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- success
|
||||||
|
- count
|
||||||
|
- material_1
|
||||||
|
- material_2
|
||||||
|
- material_3
|
||||||
|
- material_4
|
||||||
|
- material_5
|
||||||
|
- message
|
||||||
|
- unilabos_samples
|
||||||
|
title: PrepareMaterialsResult
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- goal
|
||||||
|
title: prepare_materials参数
|
||||||
|
type: object
|
||||||
|
type: UniLabJsonCommand
|
||||||
|
auto-start_heating:
|
||||||
|
feedback: {}
|
||||||
|
goal: {}
|
||||||
|
goal_default:
|
||||||
|
material_number: null
|
||||||
|
station_id: null
|
||||||
|
handles:
|
||||||
|
input:
|
||||||
|
- data_key: station_id
|
||||||
|
data_source: handle
|
||||||
|
data_type: workbench_station
|
||||||
|
handler_key: station_id_input
|
||||||
|
label: 加热台ID
|
||||||
|
- data_key: material_number
|
||||||
|
data_source: handle
|
||||||
|
data_type: workbench_material
|
||||||
|
handler_key: material_number_input
|
||||||
|
label: 物料编号
|
||||||
|
output:
|
||||||
|
- data_key: station_id
|
||||||
|
data_source: executor
|
||||||
|
data_type: workbench_station
|
||||||
|
handler_key: heating_done_station
|
||||||
|
label: 加热完成-加热台ID
|
||||||
|
- data_key: material_number
|
||||||
|
data_source: executor
|
||||||
|
data_type: workbench_material
|
||||||
|
handler_key: heating_done_material
|
||||||
|
label: 加热完成-物料编号
|
||||||
|
placeholder_keys: {}
|
||||||
|
result: {}
|
||||||
|
schema:
|
||||||
|
description: 启动指定加热台的加热程序
|
||||||
|
properties:
|
||||||
|
feedback: {}
|
||||||
|
goal:
|
||||||
|
properties:
|
||||||
|
material_number:
|
||||||
|
description: 物料编号,从上一节点传入
|
||||||
|
type: integer
|
||||||
|
station_id:
|
||||||
|
description: 加热台ID,1-3,从上一节点传入
|
||||||
|
type: integer
|
||||||
|
required:
|
||||||
|
- station_id
|
||||||
|
- material_number
|
||||||
|
type: object
|
||||||
|
result:
|
||||||
|
$defs:
|
||||||
|
LabSample:
|
||||||
|
properties:
|
||||||
|
extra:
|
||||||
|
additionalProperties: true
|
||||||
|
title: Extra
|
||||||
|
type: object
|
||||||
|
oss_path:
|
||||||
|
title: Oss Path
|
||||||
|
type: string
|
||||||
|
sample_uuid:
|
||||||
|
title: Sample Uuid
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- sample_uuid
|
||||||
|
- oss_path
|
||||||
|
- extra
|
||||||
|
title: LabSample
|
||||||
|
type: object
|
||||||
|
description: start_heating 返回类型
|
||||||
|
properties:
|
||||||
|
material_id:
|
||||||
|
title: Material Id
|
||||||
|
type: string
|
||||||
|
material_number:
|
||||||
|
title: Material Number
|
||||||
|
type: integer
|
||||||
|
message:
|
||||||
|
title: Message
|
||||||
|
type: string
|
||||||
|
station_id:
|
||||||
|
title: Station Id
|
||||||
|
type: integer
|
||||||
|
success:
|
||||||
|
title: Success
|
||||||
|
type: boolean
|
||||||
|
unilabos_samples:
|
||||||
|
items:
|
||||||
|
$ref: '#/$defs/LabSample'
|
||||||
|
title: Unilabos Samples
|
||||||
|
type: array
|
||||||
|
required:
|
||||||
|
- success
|
||||||
|
- station_id
|
||||||
|
- material_id
|
||||||
|
- material_number
|
||||||
|
- message
|
||||||
|
- unilabos_samples
|
||||||
|
title: StartHeatingResult
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- goal
|
||||||
|
title: start_heating参数
|
||||||
|
type: object
|
||||||
|
type: UniLabJsonCommand
|
||||||
|
module: unilabos.devices.virtual.workbench:VirtualWorkbench
|
||||||
|
status_types:
|
||||||
|
active_tasks_count: int
|
||||||
|
arm_current_task: str
|
||||||
|
arm_state: str
|
||||||
|
heating_station_1_material: str
|
||||||
|
heating_station_1_progress: float
|
||||||
|
heating_station_1_state: str
|
||||||
|
heating_station_2_material: str
|
||||||
|
heating_station_2_progress: float
|
||||||
|
heating_station_2_state: str
|
||||||
|
heating_station_3_material: str
|
||||||
|
heating_station_3_progress: float
|
||||||
|
heating_station_3_state: str
|
||||||
|
message: str
|
||||||
|
status: str
|
||||||
|
type: python
|
||||||
|
config_info: []
|
||||||
|
description: Virtual Workbench with 1 robotic arm and 3 heating stations for concurrent
|
||||||
|
material processing
|
||||||
|
handles: []
|
||||||
|
icon: ''
|
||||||
|
init_param_schema:
|
||||||
|
config:
|
||||||
|
properties:
|
||||||
|
config:
|
||||||
|
type: string
|
||||||
|
device_id:
|
||||||
|
type: string
|
||||||
|
required: []
|
||||||
|
type: object
|
||||||
|
data:
|
||||||
|
properties:
|
||||||
|
active_tasks_count:
|
||||||
|
type: integer
|
||||||
|
arm_current_task:
|
||||||
|
type: string
|
||||||
|
arm_state:
|
||||||
|
type: string
|
||||||
|
heating_station_1_material:
|
||||||
|
type: string
|
||||||
|
heating_station_1_progress:
|
||||||
|
type: number
|
||||||
|
heating_station_1_state:
|
||||||
|
type: string
|
||||||
|
heating_station_2_material:
|
||||||
|
type: string
|
||||||
|
heating_station_2_progress:
|
||||||
|
type: number
|
||||||
|
heating_station_2_state:
|
||||||
|
type: string
|
||||||
|
heating_station_3_material:
|
||||||
|
type: string
|
||||||
|
heating_station_3_progress:
|
||||||
|
type: number
|
||||||
|
heating_station_3_state:
|
||||||
|
type: string
|
||||||
|
message:
|
||||||
|
type: string
|
||||||
|
status:
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- status
|
||||||
|
- arm_state
|
||||||
|
- arm_current_task
|
||||||
|
- heating_station_1_state
|
||||||
|
- heating_station_1_material
|
||||||
|
- heating_station_1_progress
|
||||||
|
- heating_station_2_state
|
||||||
|
- heating_station_2_material
|
||||||
|
- heating_station_2_progress
|
||||||
|
- heating_station_3_state
|
||||||
|
- heating_station_3_material
|
||||||
|
- heating_station_3_progress
|
||||||
|
- active_tasks_count
|
||||||
|
- message
|
||||||
|
type: object
|
||||||
|
version: 1.0.0
|
||||||
|
|||||||
@@ -4,6 +4,8 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import inspect
|
import inspect
|
||||||
import importlib
|
import importlib
|
||||||
|
import threading
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Union, Tuple
|
from typing import Any, Dict, List, Union, Tuple
|
||||||
|
|
||||||
@@ -60,6 +62,7 @@ class Registry:
|
|||||||
self.device_module_to_registry = {}
|
self.device_module_to_registry = {}
|
||||||
self.resource_type_registry = {}
|
self.resource_type_registry = {}
|
||||||
self._setup_called = False # 跟踪setup是否已调用
|
self._setup_called = False # 跟踪setup是否已调用
|
||||||
|
self._registry_lock = threading.Lock() # 多线程加载时的锁
|
||||||
# 其他状态变量
|
# 其他状态变量
|
||||||
# self.is_host_mode = False # 移至BasicConfig中
|
# self.is_host_mode = False # 移至BasicConfig中
|
||||||
|
|
||||||
@@ -71,6 +74,20 @@ class Registry:
|
|||||||
|
|
||||||
from unilabos.app.web.utils.action_utils import get_yaml_from_goal_type
|
from unilabos.app.web.utils.action_utils import get_yaml_from_goal_type
|
||||||
|
|
||||||
|
# 获取 HostNode 类的增强信息,用于自动生成 action schema
|
||||||
|
host_node_enhanced_info = get_enhanced_class_info(
|
||||||
|
"unilabos.ros.nodes.presets.host_node:HostNode", use_dynamic=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# 为 test_latency 生成 schema,保留原有 description
|
||||||
|
test_latency_method_info = host_node_enhanced_info.get("action_methods", {}).get("test_latency", {})
|
||||||
|
test_latency_schema = self._generate_unilab_json_command_schema(
|
||||||
|
test_latency_method_info.get("args", []),
|
||||||
|
"test_latency",
|
||||||
|
test_latency_method_info.get("return_annotation"),
|
||||||
|
)
|
||||||
|
test_latency_schema["description"] = "用于测试延迟的动作,返回延迟时间和时间差。"
|
||||||
|
|
||||||
self.device_type_registry.update(
|
self.device_type_registry.update(
|
||||||
{
|
{
|
||||||
"host_node": {
|
"host_node": {
|
||||||
@@ -153,14 +170,18 @@ class Registry:
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
"test_latency": {
|
"test_latency": {
|
||||||
"type": self.EmptyIn,
|
"type": (
|
||||||
|
"UniLabJsonCommandAsync"
|
||||||
|
if test_latency_method_info.get("is_async", False)
|
||||||
|
else "UniLabJsonCommand"
|
||||||
|
),
|
||||||
"goal": {},
|
"goal": {},
|
||||||
"feedback": {},
|
"feedback": {},
|
||||||
"result": {},
|
"result": {},
|
||||||
"schema": ros_action_to_json_schema(
|
"schema": test_latency_schema,
|
||||||
self.EmptyIn, "用于测试延迟的动作,返回延迟时间和时间差。"
|
"goal_default": {
|
||||||
),
|
arg["name"]: arg["default"] for arg in test_latency_method_info.get("args", [])
|
||||||
"goal_default": {},
|
},
|
||||||
"handles": {},
|
"handles": {},
|
||||||
},
|
},
|
||||||
"auto-test_resource": {
|
"auto-test_resource": {
|
||||||
@@ -243,67 +264,115 @@ class Registry:
|
|||||||
# 标记setup已被调用
|
# 标记setup已被调用
|
||||||
self._setup_called = True
|
self._setup_called = True
|
||||||
|
|
||||||
|
def _load_single_resource_file(
|
||||||
|
self, file: Path, complete_registry: bool, upload_registry: bool
|
||||||
|
) -> Tuple[Dict[str, Any], Dict[str, Any], bool]:
|
||||||
|
"""
|
||||||
|
加载单个资源文件 (线程安全)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(data, complete_data, is_valid): 资源数据, 完整数据, 是否有效
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(file, encoding="utf-8", mode="r") as f:
|
||||||
|
data = yaml.safe_load(io.StringIO(f.read()))
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"[UniLab Registry] 读取资源文件失败: {file}, 错误: {e}")
|
||||||
|
return {}, {}, False
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return {}, {}, False
|
||||||
|
|
||||||
|
complete_data = {}
|
||||||
|
for resource_id, resource_info in data.items():
|
||||||
|
if "version" not in resource_info:
|
||||||
|
resource_info["version"] = "1.0.0"
|
||||||
|
if "category" not in resource_info:
|
||||||
|
resource_info["category"] = [file.stem]
|
||||||
|
elif file.stem not in resource_info["category"]:
|
||||||
|
resource_info["category"].append(file.stem)
|
||||||
|
elif not isinstance(resource_info.get("category"), list):
|
||||||
|
resource_info["category"] = [resource_info["category"]]
|
||||||
|
if "config_info" not in resource_info:
|
||||||
|
resource_info["config_info"] = []
|
||||||
|
if "icon" not in resource_info:
|
||||||
|
resource_info["icon"] = ""
|
||||||
|
if "handles" not in resource_info:
|
||||||
|
resource_info["handles"] = []
|
||||||
|
if "init_param_schema" not in resource_info:
|
||||||
|
resource_info["init_param_schema"] = {}
|
||||||
|
if "config_info" in resource_info:
|
||||||
|
del resource_info["config_info"]
|
||||||
|
if "file_path" in resource_info:
|
||||||
|
del resource_info["file_path"]
|
||||||
|
complete_data[resource_id] = copy.deepcopy(dict(sorted(resource_info.items())))
|
||||||
|
if upload_registry:
|
||||||
|
class_info = resource_info.get("class", {})
|
||||||
|
if len(class_info) and "module" in class_info:
|
||||||
|
if class_info.get("type") == "pylabrobot":
|
||||||
|
res_class = get_class(class_info["module"])
|
||||||
|
if callable(res_class) and not isinstance(res_class, type):
|
||||||
|
res_instance = res_class(res_class.__name__)
|
||||||
|
res_ulr = tree_to_list([resource_plr_to_ulab(res_instance)])
|
||||||
|
resource_info["config_info"] = res_ulr
|
||||||
|
resource_info["registry_type"] = "resource"
|
||||||
|
resource_info["file_path"] = str(file.absolute()).replace("\\", "/")
|
||||||
|
|
||||||
|
complete_data = dict(sorted(complete_data.items()))
|
||||||
|
complete_data = copy.deepcopy(complete_data)
|
||||||
|
|
||||||
|
if complete_registry:
|
||||||
|
try:
|
||||||
|
with open(file, "w", encoding="utf-8") as f:
|
||||||
|
yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"[UniLab Registry] 写入资源文件失败: {file}, 错误: {e}")
|
||||||
|
|
||||||
|
return data, complete_data, True
|
||||||
|
|
||||||
def load_resource_types(self, path: os.PathLike, complete_registry: bool, upload_registry: bool):
|
def load_resource_types(self, path: os.PathLike, complete_registry: bool, upload_registry: bool):
|
||||||
abs_path = Path(path).absolute()
|
abs_path = Path(path).absolute()
|
||||||
resource_path = abs_path / "resources"
|
resource_path = abs_path / "resources"
|
||||||
files = list(resource_path.glob("*/*.yaml"))
|
files = list(resource_path.glob("*/*.yaml"))
|
||||||
logger.trace(f"[UniLab Registry] load resources? {resource_path.exists()}, total: {len(files)}")
|
logger.debug(f"[UniLab Registry] resources: {resource_path.exists()}, total: {len(files)}")
|
||||||
current_resource_number = len(self.resource_type_registry) + 1
|
|
||||||
for i, file in enumerate(files):
|
|
||||||
with open(file, encoding="utf-8", mode="r") as f:
|
|
||||||
data = yaml.safe_load(io.StringIO(f.read()))
|
|
||||||
complete_data = {}
|
|
||||||
if data:
|
|
||||||
# 为每个资源添加文件路径信息
|
|
||||||
for resource_id, resource_info in data.items():
|
|
||||||
if "version" not in resource_info:
|
|
||||||
resource_info["version"] = "1.0.0"
|
|
||||||
if "category" not in resource_info:
|
|
||||||
resource_info["category"] = [file.stem]
|
|
||||||
elif file.stem not in resource_info["category"]:
|
|
||||||
resource_info["category"].append(file.stem)
|
|
||||||
elif not isinstance(resource_info.get("category"), list):
|
|
||||||
resource_info["category"] = [resource_info["category"]]
|
|
||||||
if "config_info" not in resource_info:
|
|
||||||
resource_info["config_info"] = []
|
|
||||||
if "icon" not in resource_info:
|
|
||||||
resource_info["icon"] = ""
|
|
||||||
if "handles" not in resource_info:
|
|
||||||
resource_info["handles"] = []
|
|
||||||
if "init_param_schema" not in resource_info:
|
|
||||||
resource_info["init_param_schema"] = {}
|
|
||||||
if "config_info" in resource_info:
|
|
||||||
del resource_info["config_info"]
|
|
||||||
if "file_path" in resource_info:
|
|
||||||
del resource_info["file_path"]
|
|
||||||
complete_data[resource_id] = copy.deepcopy(dict(sorted(resource_info.items())))
|
|
||||||
if upload_registry:
|
|
||||||
class_info = resource_info.get("class", {})
|
|
||||||
if len(class_info) and "module" in class_info:
|
|
||||||
if class_info.get("type") == "pylabrobot":
|
|
||||||
res_class = get_class(class_info["module"])
|
|
||||||
if callable(res_class) and not isinstance(
|
|
||||||
res_class, type
|
|
||||||
): # 有的是类,有的是函数,这里暂时只登记函数类的
|
|
||||||
res_instance = res_class(res_class.__name__)
|
|
||||||
res_ulr = tree_to_list([resource_plr_to_ulab(res_instance)])
|
|
||||||
resource_info["config_info"] = res_ulr
|
|
||||||
resource_info["registry_type"] = "resource"
|
|
||||||
resource_info["file_path"] = str(file.absolute()).replace("\\", "/")
|
|
||||||
complete_data = dict(sorted(complete_data.items()))
|
|
||||||
complete_data = copy.deepcopy(complete_data)
|
|
||||||
if complete_registry:
|
|
||||||
with open(file, "w", encoding="utf-8") as f:
|
|
||||||
yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper)
|
|
||||||
|
|
||||||
|
if not files:
|
||||||
|
return
|
||||||
|
|
||||||
|
# 使用线程池并行加载
|
||||||
|
max_workers = min(8, len(files))
|
||||||
|
results = []
|
||||||
|
|
||||||
|
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||||
|
future_to_file = {
|
||||||
|
executor.submit(self._load_single_resource_file, file, complete_registry, upload_registry): file
|
||||||
|
for file in files
|
||||||
|
}
|
||||||
|
for future in as_completed(future_to_file):
|
||||||
|
file = future_to_file[future]
|
||||||
|
try:
|
||||||
|
data, complete_data, is_valid = future.result()
|
||||||
|
if is_valid:
|
||||||
|
results.append((file, data))
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"[UniLab Registry] 处理资源文件异常: {file}, 错误: {e}")
|
||||||
|
|
||||||
|
# 线程安全地更新注册表
|
||||||
|
current_resource_number = len(self.resource_type_registry) + 1
|
||||||
|
with self._registry_lock:
|
||||||
|
for i, (file, data) in enumerate(results):
|
||||||
self.resource_type_registry.update(data)
|
self.resource_type_registry.update(data)
|
||||||
logger.trace( # type: ignore
|
logger.trace(
|
||||||
f"[UniLab Registry] Resource-{current_resource_number} File-{i+1}/{len(files)} "
|
f"[UniLab Registry] Resource-{current_resource_number} File-{i+1}/{len(results)} "
|
||||||
+ f"Add {list(data.keys())}"
|
+ f"Add {list(data.keys())}"
|
||||||
)
|
)
|
||||||
current_resource_number += 1
|
current_resource_number += 1
|
||||||
else:
|
|
||||||
logger.debug(f"[UniLab Registry] Res File-{i+1}/{len(files)} Not Valid YAML File: {file.absolute()}")
|
# 记录无效文件
|
||||||
|
valid_files = {r[0] for r in results}
|
||||||
|
for file in files:
|
||||||
|
if file not in valid_files:
|
||||||
|
logger.debug(f"[UniLab Registry] Res File Not Valid YAML File: {file.absolute()}")
|
||||||
|
|
||||||
def _extract_class_docstrings(self, module_string: str) -> Dict[str, str]:
|
def _extract_class_docstrings(self, module_string: str) -> Dict[str, str]:
|
||||||
"""
|
"""
|
||||||
@@ -480,7 +549,11 @@ class Registry:
|
|||||||
return status_schema
|
return status_schema
|
||||||
|
|
||||||
def _generate_unilab_json_command_schema(
|
def _generate_unilab_json_command_schema(
|
||||||
self, method_args: List[Dict[str, Any]], method_name: str, return_annotation: Any = None
|
self,
|
||||||
|
method_args: List[Dict[str, Any]],
|
||||||
|
method_name: str,
|
||||||
|
return_annotation: Any = None,
|
||||||
|
previous_schema: Dict[str, Any] | None = None,
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
根据UniLabJsonCommand方法信息生成JSON Schema,暂不支持嵌套类型
|
根据UniLabJsonCommand方法信息生成JSON Schema,暂不支持嵌套类型
|
||||||
@@ -489,6 +562,7 @@ class Registry:
|
|||||||
method_args: 方法信息字典,包含args等
|
method_args: 方法信息字典,包含args等
|
||||||
method_name: 方法名称
|
method_name: 方法名称
|
||||||
return_annotation: 返回类型注解,用于生成result schema(仅支持TypedDict)
|
return_annotation: 返回类型注解,用于生成result schema(仅支持TypedDict)
|
||||||
|
previous_schema: 之前的 schema,用于保留 goal/feedback/result 下一级字段的 description
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
JSON Schema格式的参数schema
|
JSON Schema格式的参数schema
|
||||||
@@ -522,7 +596,7 @@ class Registry:
|
|||||||
if return_annotation is not None and self._is_typed_dict(return_annotation):
|
if return_annotation is not None and self._is_typed_dict(return_annotation):
|
||||||
result_schema = self._generate_typed_dict_result_schema(return_annotation)
|
result_schema = self._generate_typed_dict_result_schema(return_annotation)
|
||||||
|
|
||||||
return {
|
final_schema = {
|
||||||
"title": f"{method_name}参数",
|
"title": f"{method_name}参数",
|
||||||
"description": f"",
|
"description": f"",
|
||||||
"type": "object",
|
"type": "object",
|
||||||
@@ -530,6 +604,40 @@ class Registry:
|
|||||||
"required": ["goal"],
|
"required": ["goal"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# 保留之前 schema 中 goal/feedback/result 下一级字段的 description
|
||||||
|
if previous_schema:
|
||||||
|
self._preserve_field_descriptions(final_schema, previous_schema)
|
||||||
|
|
||||||
|
return final_schema
|
||||||
|
|
||||||
|
def _preserve_field_descriptions(self, new_schema: Dict[str, Any], previous_schema: Dict[str, Any]) -> None:
|
||||||
|
"""
|
||||||
|
保留之前 schema 中 goal/feedback/result 下一级字段的 description 和 title
|
||||||
|
|
||||||
|
Args:
|
||||||
|
new_schema: 新生成的 schema(会被修改)
|
||||||
|
previous_schema: 之前的 schema
|
||||||
|
"""
|
||||||
|
for section in ["goal", "feedback", "result"]:
|
||||||
|
new_section = new_schema.get("properties", {}).get(section, {})
|
||||||
|
prev_section = previous_schema.get("properties", {}).get(section, {})
|
||||||
|
|
||||||
|
if not new_section or not prev_section:
|
||||||
|
continue
|
||||||
|
|
||||||
|
new_props = new_section.get("properties", {})
|
||||||
|
prev_props = prev_section.get("properties", {})
|
||||||
|
|
||||||
|
for field_name, field_schema in new_props.items():
|
||||||
|
if field_name in prev_props:
|
||||||
|
prev_field = prev_props[field_name]
|
||||||
|
# 保留字段的 description
|
||||||
|
if "description" in prev_field and prev_field["description"]:
|
||||||
|
field_schema["description"] = prev_field["description"]
|
||||||
|
# 保留字段的 title(用户自定义的中文名)
|
||||||
|
if "title" in prev_field and prev_field["title"]:
|
||||||
|
field_schema["title"] = prev_field["title"]
|
||||||
|
|
||||||
def _is_typed_dict(self, annotation: Any) -> bool:
|
def _is_typed_dict(self, annotation: Any) -> bool:
|
||||||
"""
|
"""
|
||||||
检查类型注解是否是TypedDict
|
检查类型注解是否是TypedDict
|
||||||
@@ -616,209 +724,244 @@ class Registry:
|
|||||||
"handles": {},
|
"handles": {},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def _load_single_device_file(
|
||||||
|
self, file: Path, complete_registry: bool, get_yaml_from_goal_type
|
||||||
|
) -> Tuple[Dict[str, Any], Dict[str, Any], bool, List[str]]:
|
||||||
|
"""
|
||||||
|
加载单个设备文件 (线程安全)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(data, complete_data, is_valid, device_ids): 设备数据, 完整数据, 是否有效, 设备ID列表
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(file, encoding="utf-8", mode="r") as f:
|
||||||
|
data = yaml.safe_load(io.StringIO(f.read()))
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"[UniLab Registry] 读取设备文件失败: {file}, 错误: {e}")
|
||||||
|
return {}, {}, False, []
|
||||||
|
|
||||||
|
if not data:
|
||||||
|
return {}, {}, False, []
|
||||||
|
|
||||||
|
complete_data = {}
|
||||||
|
action_str_type_mapping = {
|
||||||
|
"UniLabJsonCommand": "UniLabJsonCommand",
|
||||||
|
"UniLabJsonCommandAsync": "UniLabJsonCommandAsync",
|
||||||
|
}
|
||||||
|
status_str_type_mapping = {}
|
||||||
|
device_ids = []
|
||||||
|
|
||||||
|
for device_id, device_config in data.items():
|
||||||
|
if "version" not in device_config:
|
||||||
|
device_config["version"] = "1.0.0"
|
||||||
|
if "category" not in device_config:
|
||||||
|
device_config["category"] = [file.stem]
|
||||||
|
elif file.stem not in device_config["category"]:
|
||||||
|
device_config["category"].append(file.stem)
|
||||||
|
if "config_info" not in device_config:
|
||||||
|
device_config["config_info"] = []
|
||||||
|
if "description" not in device_config:
|
||||||
|
device_config["description"] = ""
|
||||||
|
if "icon" not in device_config:
|
||||||
|
device_config["icon"] = ""
|
||||||
|
if "handles" not in device_config:
|
||||||
|
device_config["handles"] = []
|
||||||
|
if "init_param_schema" not in device_config:
|
||||||
|
device_config["init_param_schema"] = {}
|
||||||
|
if "class" in device_config:
|
||||||
|
if "status_types" not in device_config["class"] or device_config["class"]["status_types"] is None:
|
||||||
|
device_config["class"]["status_types"] = {}
|
||||||
|
if (
|
||||||
|
"action_value_mappings" not in device_config["class"]
|
||||||
|
or device_config["class"]["action_value_mappings"] is None
|
||||||
|
):
|
||||||
|
device_config["class"]["action_value_mappings"] = {}
|
||||||
|
enhanced_info = {}
|
||||||
|
if complete_registry:
|
||||||
|
device_config["class"]["status_types"].clear()
|
||||||
|
enhanced_info = get_enhanced_class_info(device_config["class"]["module"], use_dynamic=True)
|
||||||
|
if not enhanced_info.get("dynamic_import_success", False):
|
||||||
|
continue
|
||||||
|
device_config["class"]["status_types"].update(
|
||||||
|
{k: v["return_type"] for k, v in enhanced_info["status_methods"].items()}
|
||||||
|
)
|
||||||
|
for status_name, status_type in device_config["class"]["status_types"].items():
|
||||||
|
if isinstance(status_type, tuple) or status_type in ["Any", "None", "Unknown"]:
|
||||||
|
status_type = "String"
|
||||||
|
device_config["class"]["status_types"][status_name] = status_type
|
||||||
|
try:
|
||||||
|
target_type = self._replace_type_with_class(status_type, device_id, f"状态 {status_name}")
|
||||||
|
except ROSMsgNotFound:
|
||||||
|
continue
|
||||||
|
if target_type in [dict, list]:
|
||||||
|
target_type = String
|
||||||
|
status_str_type_mapping[status_type] = target_type
|
||||||
|
device_config["class"]["status_types"] = dict(sorted(device_config["class"]["status_types"].items()))
|
||||||
|
if complete_registry:
|
||||||
|
old_action_configs = {}
|
||||||
|
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
|
||||||
|
old_action_configs[action_name] = action_config
|
||||||
|
|
||||||
|
device_config["class"]["action_value_mappings"] = {
|
||||||
|
k: v
|
||||||
|
for k, v in device_config["class"]["action_value_mappings"].items()
|
||||||
|
if not k.startswith("auto-")
|
||||||
|
}
|
||||||
|
device_config["class"]["action_value_mappings"].update(
|
||||||
|
{
|
||||||
|
f"auto-{k}": {
|
||||||
|
"type": "UniLabJsonCommandAsync" if v["is_async"] else "UniLabJsonCommand",
|
||||||
|
"goal": {},
|
||||||
|
"feedback": {},
|
||||||
|
"result": {},
|
||||||
|
"schema": self._generate_unilab_json_command_schema(
|
||||||
|
v["args"],
|
||||||
|
k,
|
||||||
|
v.get("return_annotation"),
|
||||||
|
old_action_configs.get(f"auto-{k}", {}).get("schema"),
|
||||||
|
),
|
||||||
|
"goal_default": {i["name"]: i["default"] for i in v["args"]},
|
||||||
|
"handles": old_action_configs.get(f"auto-{k}", {}).get("handles", []),
|
||||||
|
"placeholder_keys": {
|
||||||
|
i["name"]: (
|
||||||
|
"unilabos_resources"
|
||||||
|
if i["type"] == "unilabos.registry.placeholder_type:ResourceSlot"
|
||||||
|
or i["type"] == ("list", "unilabos.registry.placeholder_type:ResourceSlot")
|
||||||
|
else "unilabos_devices"
|
||||||
|
)
|
||||||
|
for i in v["args"]
|
||||||
|
if i.get("type", "")
|
||||||
|
in [
|
||||||
|
"unilabos.registry.placeholder_type:ResourceSlot",
|
||||||
|
"unilabos.registry.placeholder_type:DeviceSlot",
|
||||||
|
("list", "unilabos.registry.placeholder_type:ResourceSlot"),
|
||||||
|
("list", "unilabos.registry.placeholder_type:DeviceSlot"),
|
||||||
|
]
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for k, v in enhanced_info["action_methods"].items()
|
||||||
|
if k not in device_config["class"]["action_value_mappings"]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
for action_name, old_config in old_action_configs.items():
|
||||||
|
if action_name in device_config["class"]["action_value_mappings"]:
|
||||||
|
old_schema = old_config.get("schema", {})
|
||||||
|
if "description" in old_schema and old_schema["description"]:
|
||||||
|
device_config["class"]["action_value_mappings"][action_name]["schema"][
|
||||||
|
"description"
|
||||||
|
] = old_schema["description"]
|
||||||
|
device_config["init_param_schema"] = {}
|
||||||
|
device_config["init_param_schema"]["config"] = self._generate_unilab_json_command_schema(
|
||||||
|
enhanced_info["init_params"], "__init__"
|
||||||
|
)["properties"]["goal"]
|
||||||
|
device_config["init_param_schema"]["data"] = self._generate_status_types_schema(
|
||||||
|
enhanced_info["status_methods"]
|
||||||
|
)
|
||||||
|
|
||||||
|
device_config.pop("schema", None)
|
||||||
|
device_config["class"]["action_value_mappings"] = dict(
|
||||||
|
sorted(device_config["class"]["action_value_mappings"].items())
|
||||||
|
)
|
||||||
|
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
|
||||||
|
if "handles" not in action_config:
|
||||||
|
action_config["handles"] = {}
|
||||||
|
elif isinstance(action_config["handles"], list):
|
||||||
|
if len(action_config["handles"]):
|
||||||
|
logger.error(f"设备{device_id} {action_name} 的handles配置错误,应该是字典类型")
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
action_config["handles"] = {}
|
||||||
|
if "type" in action_config:
|
||||||
|
action_type_str: str = action_config["type"]
|
||||||
|
if not action_type_str.startswith("UniLabJsonCommand"):
|
||||||
|
try:
|
||||||
|
target_type = self._replace_type_with_class(
|
||||||
|
action_type_str, device_id, f"动作 {action_name}"
|
||||||
|
)
|
||||||
|
except ROSMsgNotFound:
|
||||||
|
continue
|
||||||
|
action_str_type_mapping[action_type_str] = target_type
|
||||||
|
if target_type is not None:
|
||||||
|
action_config["goal_default"] = yaml.safe_load(
|
||||||
|
io.StringIO(get_yaml_from_goal_type(target_type.Goal))
|
||||||
|
)
|
||||||
|
action_config["schema"] = ros_action_to_json_schema(target_type)
|
||||||
|
else:
|
||||||
|
logger.warning(
|
||||||
|
f"[UniLab Registry] 设备 {device_id} 的动作 {action_name} 类型为空,跳过替换"
|
||||||
|
)
|
||||||
|
complete_data[device_id] = copy.deepcopy(dict(sorted(device_config.items())))
|
||||||
|
for status_name, status_type in device_config["class"]["status_types"].items():
|
||||||
|
device_config["class"]["status_types"][status_name] = status_str_type_mapping[status_type]
|
||||||
|
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
|
||||||
|
if action_config["type"] not in action_str_type_mapping:
|
||||||
|
continue
|
||||||
|
action_config["type"] = action_str_type_mapping[action_config["type"]]
|
||||||
|
self._add_builtin_actions(device_config, device_id)
|
||||||
|
device_config["file_path"] = str(file.absolute()).replace("\\", "/")
|
||||||
|
device_config["registry_type"] = "device"
|
||||||
|
device_ids.append(device_id)
|
||||||
|
|
||||||
|
complete_data = dict(sorted(complete_data.items()))
|
||||||
|
complete_data = copy.deepcopy(complete_data)
|
||||||
|
try:
|
||||||
|
with open(file, "w", encoding="utf-8") as f:
|
||||||
|
yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"[UniLab Registry] 写入设备文件失败: {file}, 错误: {e}")
|
||||||
|
|
||||||
|
return data, complete_data, True, device_ids
|
||||||
|
|
||||||
def load_device_types(self, path: os.PathLike, complete_registry: bool):
|
def load_device_types(self, path: os.PathLike, complete_registry: bool):
|
||||||
# return
|
|
||||||
abs_path = Path(path).absolute()
|
abs_path = Path(path).absolute()
|
||||||
devices_path = abs_path / "devices"
|
devices_path = abs_path / "devices"
|
||||||
device_comms_path = abs_path / "device_comms"
|
device_comms_path = abs_path / "device_comms"
|
||||||
files = list(devices_path.glob("*.yaml")) + list(device_comms_path.glob("*.yaml"))
|
files = list(devices_path.glob("*.yaml")) + list(device_comms_path.glob("*.yaml"))
|
||||||
logger.trace( # type: ignore
|
logger.trace(
|
||||||
f"[UniLab Registry] devices: {devices_path.exists()}, device_comms: {device_comms_path.exists()}, "
|
f"[UniLab Registry] devices: {devices_path.exists()}, device_comms: {device_comms_path.exists()}, "
|
||||||
+ f"total: {len(files)}"
|
+ f"total: {len(files)}"
|
||||||
)
|
)
|
||||||
current_device_number = len(self.device_type_registry) + 1
|
|
||||||
|
if not files:
|
||||||
|
return
|
||||||
|
|
||||||
from unilabos.app.web.utils.action_utils import get_yaml_from_goal_type
|
from unilabos.app.web.utils.action_utils import get_yaml_from_goal_type
|
||||||
|
|
||||||
for i, file in enumerate(files):
|
# 使用线程池并行加载
|
||||||
with open(file, encoding="utf-8", mode="r") as f:
|
max_workers = min(8, len(files))
|
||||||
data = yaml.safe_load(io.StringIO(f.read()))
|
results = []
|
||||||
complete_data = {}
|
|
||||||
action_str_type_mapping = {
|
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||||
"UniLabJsonCommand": "UniLabJsonCommand",
|
future_to_file = {
|
||||||
"UniLabJsonCommandAsync": "UniLabJsonCommandAsync",
|
executor.submit(self._load_single_device_file, file, complete_registry, get_yaml_from_goal_type): file
|
||||||
|
for file in files
|
||||||
}
|
}
|
||||||
status_str_type_mapping = {}
|
for future in as_completed(future_to_file):
|
||||||
if data:
|
file = future_to_file[future]
|
||||||
# 在添加到注册表前处理类型替换
|
try:
|
||||||
for device_id, device_config in data.items():
|
data, complete_data, is_valid, device_ids = future.result()
|
||||||
# 添加文件路径信息 - 使用规范化的完整文件路径
|
if is_valid:
|
||||||
if "version" not in device_config:
|
results.append((file, data, device_ids))
|
||||||
device_config["version"] = "1.0.0"
|
except Exception as e:
|
||||||
if "category" not in device_config:
|
logger.warning(f"[UniLab Registry] 处理设备文件异常: {file}, 错误: {e}")
|
||||||
device_config["category"] = [file.stem]
|
|
||||||
elif file.stem not in device_config["category"]:
|
|
||||||
device_config["category"].append(file.stem)
|
|
||||||
if "config_info" not in device_config:
|
|
||||||
device_config["config_info"] = []
|
|
||||||
if "description" not in device_config:
|
|
||||||
device_config["description"] = ""
|
|
||||||
if "icon" not in device_config:
|
|
||||||
device_config["icon"] = ""
|
|
||||||
if "handles" not in device_config:
|
|
||||||
device_config["handles"] = []
|
|
||||||
if "init_param_schema" not in device_config:
|
|
||||||
device_config["init_param_schema"] = {}
|
|
||||||
if "class" in device_config:
|
|
||||||
if (
|
|
||||||
"status_types" not in device_config["class"]
|
|
||||||
or device_config["class"]["status_types"] is None
|
|
||||||
):
|
|
||||||
device_config["class"]["status_types"] = {}
|
|
||||||
if (
|
|
||||||
"action_value_mappings" not in device_config["class"]
|
|
||||||
or device_config["class"]["action_value_mappings"] is None
|
|
||||||
):
|
|
||||||
device_config["class"]["action_value_mappings"] = {}
|
|
||||||
enhanced_info = {}
|
|
||||||
if complete_registry:
|
|
||||||
device_config["class"]["status_types"].clear()
|
|
||||||
enhanced_info = get_enhanced_class_info(device_config["class"]["module"], use_dynamic=True)
|
|
||||||
if not enhanced_info.get("dynamic_import_success", False):
|
|
||||||
continue
|
|
||||||
device_config["class"]["status_types"].update(
|
|
||||||
{k: v["return_type"] for k, v in enhanced_info["status_methods"].items()}
|
|
||||||
)
|
|
||||||
for status_name, status_type in device_config["class"]["status_types"].items():
|
|
||||||
if isinstance(status_type, tuple) or status_type in ["Any", "None", "Unknown"]:
|
|
||||||
status_type = "String" # 替换成ROS的String,便于显示
|
|
||||||
device_config["class"]["status_types"][status_name] = status_type
|
|
||||||
try:
|
|
||||||
target_type = self._replace_type_with_class(
|
|
||||||
status_type, device_id, f"状态 {status_name}"
|
|
||||||
)
|
|
||||||
except ROSMsgNotFound:
|
|
||||||
continue
|
|
||||||
if target_type in [
|
|
||||||
dict,
|
|
||||||
list,
|
|
||||||
]: # 对于嵌套类型返回的对象,暂时处理成字符串,无法直接进行转换
|
|
||||||
target_type = String
|
|
||||||
status_str_type_mapping[status_type] = target_type
|
|
||||||
device_config["class"]["status_types"] = dict(
|
|
||||||
sorted(device_config["class"]["status_types"].items())
|
|
||||||
)
|
|
||||||
if complete_registry:
|
|
||||||
# 保存原有的description信息
|
|
||||||
old_descriptions = {}
|
|
||||||
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
|
|
||||||
if "description" in action_config.get("schema", {}):
|
|
||||||
description = action_config["schema"]["description"]
|
|
||||||
if len(description):
|
|
||||||
old_descriptions[action_name] = action_config["schema"]["description"]
|
|
||||||
|
|
||||||
device_config["class"]["action_value_mappings"] = {
|
# 线程安全地更新注册表
|
||||||
k: v
|
current_device_number = len(self.device_type_registry) + 1
|
||||||
for k, v in device_config["class"]["action_value_mappings"].items()
|
with self._registry_lock:
|
||||||
if not k.startswith("auto-")
|
for file, data, device_ids in results:
|
||||||
}
|
self.device_type_registry.update(data)
|
||||||
# 处理动作值映射
|
for device_id in device_ids:
|
||||||
device_config["class"]["action_value_mappings"].update(
|
logger.trace(
|
||||||
{
|
f"[UniLab Registry] Device-{current_device_number} Add {device_id} "
|
||||||
f"auto-{k}": {
|
|
||||||
"type": "UniLabJsonCommandAsync" if v["is_async"] else "UniLabJsonCommand",
|
|
||||||
"goal": {},
|
|
||||||
"feedback": {},
|
|
||||||
"result": {},
|
|
||||||
"schema": self._generate_unilab_json_command_schema(
|
|
||||||
v["args"], k, v.get("return_annotation")
|
|
||||||
),
|
|
||||||
"goal_default": {i["name"]: i["default"] for i in v["args"]},
|
|
||||||
"handles": [],
|
|
||||||
"placeholder_keys": {
|
|
||||||
i["name"]: (
|
|
||||||
"unilabos_resources"
|
|
||||||
if i["type"] == "unilabos.registry.placeholder_type:ResourceSlot"
|
|
||||||
or i["type"]
|
|
||||||
== ("list", "unilabos.registry.placeholder_type:ResourceSlot")
|
|
||||||
else "unilabos_devices"
|
|
||||||
)
|
|
||||||
for i in v["args"]
|
|
||||||
if i.get("type", "")
|
|
||||||
in [
|
|
||||||
"unilabos.registry.placeholder_type:ResourceSlot",
|
|
||||||
"unilabos.registry.placeholder_type:DeviceSlot",
|
|
||||||
("list", "unilabos.registry.placeholder_type:ResourceSlot"),
|
|
||||||
("list", "unilabos.registry.placeholder_type:DeviceSlot"),
|
|
||||||
]
|
|
||||||
},
|
|
||||||
}
|
|
||||||
# 不生成已配置action的动作
|
|
||||||
for k, v in enhanced_info["action_methods"].items()
|
|
||||||
if k not in device_config["class"]["action_value_mappings"]
|
|
||||||
}
|
|
||||||
)
|
|
||||||
# 恢复原有的description信息(auto开头的不修改)
|
|
||||||
for action_name, description in old_descriptions.items():
|
|
||||||
if action_name in device_config["class"]["action_value_mappings"]: # 有一些会被删除
|
|
||||||
device_config["class"]["action_value_mappings"][action_name]["schema"][
|
|
||||||
"description"
|
|
||||||
] = description
|
|
||||||
device_config["init_param_schema"] = {}
|
|
||||||
device_config["init_param_schema"]["config"] = self._generate_unilab_json_command_schema(
|
|
||||||
enhanced_info["init_params"], "__init__"
|
|
||||||
)["properties"]["goal"]
|
|
||||||
device_config["init_param_schema"]["data"] = self._generate_status_types_schema(
|
|
||||||
enhanced_info["status_methods"]
|
|
||||||
)
|
|
||||||
|
|
||||||
device_config.pop("schema", None)
|
|
||||||
device_config["class"]["action_value_mappings"] = dict(
|
|
||||||
sorted(device_config["class"]["action_value_mappings"].items())
|
|
||||||
)
|
|
||||||
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
|
|
||||||
if "handles" not in action_config:
|
|
||||||
action_config["handles"] = {}
|
|
||||||
elif isinstance(action_config["handles"], list):
|
|
||||||
if len(action_config["handles"]):
|
|
||||||
logger.error(f"设备{device_id} {action_name} 的handles配置错误,应该是字典类型")
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
action_config["handles"] = {}
|
|
||||||
if "type" in action_config:
|
|
||||||
action_type_str: str = action_config["type"]
|
|
||||||
# 通过Json发放指令,而不是通过特殊的ros action进行处理
|
|
||||||
if not action_type_str.startswith("UniLabJsonCommand"):
|
|
||||||
try:
|
|
||||||
target_type = self._replace_type_with_class(
|
|
||||||
action_type_str, device_id, f"动作 {action_name}"
|
|
||||||
)
|
|
||||||
except ROSMsgNotFound:
|
|
||||||
continue
|
|
||||||
action_str_type_mapping[action_type_str] = target_type
|
|
||||||
if target_type is not None:
|
|
||||||
action_config["goal_default"] = yaml.safe_load(
|
|
||||||
io.StringIO(get_yaml_from_goal_type(target_type.Goal))
|
|
||||||
)
|
|
||||||
action_config["schema"] = ros_action_to_json_schema(target_type)
|
|
||||||
else:
|
|
||||||
logger.warning(
|
|
||||||
f"[UniLab Registry] 设备 {device_id} 的动作 {action_name} 类型为空,跳过替换"
|
|
||||||
)
|
|
||||||
complete_data[device_id] = copy.deepcopy(dict(sorted(device_config.items()))) # 稍后dump到文件
|
|
||||||
for status_name, status_type in device_config["class"]["status_types"].items():
|
|
||||||
device_config["class"]["status_types"][status_name] = status_str_type_mapping[status_type]
|
|
||||||
for action_name, action_config in device_config["class"]["action_value_mappings"].items():
|
|
||||||
if action_config["type"] not in action_str_type_mapping:
|
|
||||||
continue
|
|
||||||
action_config["type"] = action_str_type_mapping[action_config["type"]]
|
|
||||||
# 添加内置的驱动命令动作
|
|
||||||
self._add_builtin_actions(device_config, device_id)
|
|
||||||
device_config["file_path"] = str(file.absolute()).replace("\\", "/")
|
|
||||||
device_config["registry_type"] = "device"
|
|
||||||
logger.trace( # type: ignore
|
|
||||||
f"[UniLab Registry] Device-{current_device_number} File-{i+1}/{len(files)} Add {device_id} "
|
|
||||||
+ f"[{data[device_id].get('name', '未命名设备')}]"
|
+ f"[{data[device_id].get('name', '未命名设备')}]"
|
||||||
)
|
)
|
||||||
current_device_number += 1
|
current_device_number += 1
|
||||||
complete_data = dict(sorted(complete_data.items()))
|
|
||||||
complete_data = copy.deepcopy(complete_data)
|
# 记录无效文件
|
||||||
with open(file, "w", encoding="utf-8") as f:
|
valid_files = {r[0] for r in results}
|
||||||
yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper)
|
for file in files:
|
||||||
self.device_type_registry.update(data)
|
if file not in valid_files:
|
||||||
else:
|
logger.debug(f"[UniLab Registry] Device File Not Valid YAML File: {file.absolute()}")
|
||||||
logger.debug(
|
|
||||||
f"[UniLab Registry] Device File-{i+1}/{len(files)} Not Valid YAML File: {file.absolute()}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def obtain_registry_device_info(self):
|
def obtain_registry_device_info(self):
|
||||||
devices = []
|
devices = []
|
||||||
|
|||||||
@@ -151,12 +151,40 @@ def canonicalize_links_ports(links: List[Dict[str, Any]], resource_tree_set: Res
|
|||||||
"""
|
"""
|
||||||
# 构建 id 到 uuid 的映射
|
# 构建 id 到 uuid 的映射
|
||||||
id_to_uuid: Dict[str, str] = {}
|
id_to_uuid: Dict[str, str] = {}
|
||||||
|
uuid_to_id: Dict[str, str] = {}
|
||||||
for node in resource_tree_set.all_nodes:
|
for node in resource_tree_set.all_nodes:
|
||||||
id_to_uuid[node.res_content.id] = node.res_content.uuid
|
id_to_uuid[node.res_content.id] = node.res_content.uuid
|
||||||
|
uuid_to_id[node.res_content.uuid] = node.res_content.id
|
||||||
|
|
||||||
|
# 第三遍处理:为每个 link 添加 source_uuid 和 target_uuid
|
||||||
|
for link in links:
|
||||||
|
source_id = link.get("source")
|
||||||
|
target_id = link.get("target")
|
||||||
|
|
||||||
|
# 添加 source_uuid
|
||||||
|
if source_id and source_id in id_to_uuid:
|
||||||
|
link["source_uuid"] = id_to_uuid[source_id]
|
||||||
|
|
||||||
|
# 添加 target_uuid
|
||||||
|
if target_id and target_id in id_to_uuid:
|
||||||
|
link["target_uuid"] = id_to_uuid[target_id]
|
||||||
|
|
||||||
|
source_uuid = link.get("source_uuid")
|
||||||
|
target_uuid = link.get("target_uuid")
|
||||||
|
|
||||||
|
# 添加 source_uuid
|
||||||
|
if source_uuid and source_uuid in uuid_to_id:
|
||||||
|
link["source"] = uuid_to_id[source_uuid]
|
||||||
|
|
||||||
|
# 添加 target_uuid
|
||||||
|
if target_uuid and target_uuid in uuid_to_id:
|
||||||
|
link["target"] = uuid_to_id[target_uuid]
|
||||||
|
|
||||||
# 第一遍处理:将字符串类型的port转换为字典格式
|
# 第一遍处理:将字符串类型的port转换为字典格式
|
||||||
for link in links:
|
for link in links:
|
||||||
port = link.get("port")
|
port = link.get("port")
|
||||||
|
if port is None:
|
||||||
|
continue
|
||||||
if link.get("type", "physical") == "physical":
|
if link.get("type", "physical") == "physical":
|
||||||
link["type"] = "fluid"
|
link["type"] = "fluid"
|
||||||
if isinstance(port, int):
|
if isinstance(port, int):
|
||||||
@@ -179,13 +207,15 @@ def canonicalize_links_ports(links: List[Dict[str, Any]], resource_tree_set: Res
|
|||||||
link["port"] = {link["source"]: None, link["target"]: None}
|
link["port"] = {link["source"]: None, link["target"]: None}
|
||||||
|
|
||||||
# 构建边字典,键为(source节点, target节点),值为对应的port信息
|
# 构建边字典,键为(source节点, target节点),值为对应的port信息
|
||||||
edges = {(link["source"], link["target"]): link["port"] for link in links}
|
edges = {(link["source"], link["target"]): link["port"] for link in links if link.get("port")}
|
||||||
|
|
||||||
# 第二遍处理:填充反向边的dest信息
|
# 第二遍处理:填充反向边的dest信息
|
||||||
delete_reverses = []
|
delete_reverses = []
|
||||||
for i, link in enumerate(links):
|
for i, link in enumerate(links):
|
||||||
s, t = link["source"], link["target"]
|
s, t = link["source"], link["target"]
|
||||||
current_port = link["port"]
|
current_port = link.get("port")
|
||||||
|
if current_port is None:
|
||||||
|
continue
|
||||||
if current_port.get(t) is None:
|
if current_port.get(t) is None:
|
||||||
reverse_key = (t, s)
|
reverse_key = (t, s)
|
||||||
reverse_port = edges.get(reverse_key)
|
reverse_port = edges.get(reverse_key)
|
||||||
@@ -200,20 +230,6 @@ def canonicalize_links_ports(links: List[Dict[str, Any]], resource_tree_set: Res
|
|||||||
current_port[t] = current_port[s]
|
current_port[t] = current_port[s]
|
||||||
# 删除已被使用反向端口信息的反向边
|
# 删除已被使用反向端口信息的反向边
|
||||||
standardized_links = [link for i, link in enumerate(links) if i not in delete_reverses]
|
standardized_links = [link for i, link in enumerate(links) if i not in delete_reverses]
|
||||||
|
|
||||||
# 第三遍处理:为每个 link 添加 source_uuid 和 target_uuid
|
|
||||||
for link in standardized_links:
|
|
||||||
source_id = link.get("source")
|
|
||||||
target_id = link.get("target")
|
|
||||||
|
|
||||||
# 添加 source_uuid
|
|
||||||
if source_id and source_id in id_to_uuid:
|
|
||||||
link["source_uuid"] = id_to_uuid[source_id]
|
|
||||||
|
|
||||||
# 添加 target_uuid
|
|
||||||
if target_id and target_id in id_to_uuid:
|
|
||||||
link["target_uuid"] = id_to_uuid[target_id]
|
|
||||||
|
|
||||||
return standardized_links
|
return standardized_links
|
||||||
|
|
||||||
|
|
||||||
@@ -260,7 +276,7 @@ def read_node_link_json(
|
|||||||
resource_tree_set = canonicalize_nodes_data(nodes)
|
resource_tree_set = canonicalize_nodes_data(nodes)
|
||||||
|
|
||||||
# 标准化边数据
|
# 标准化边数据
|
||||||
links = data.get("links", [])
|
links = data.get("links", data.get("edges", []))
|
||||||
standardized_links = canonicalize_links_ports(links, resource_tree_set)
|
standardized_links = canonicalize_links_ports(links, resource_tree_set)
|
||||||
|
|
||||||
# 构建 NetworkX 图(需要转换回 dict 格式)
|
# 构建 NetworkX 图(需要转换回 dict 格式)
|
||||||
@@ -284,6 +300,8 @@ def modify_to_backend_format(data: list[dict[str, Any]]) -> list[dict[str, Any]]
|
|||||||
edge["sourceHandle"] = port[source]
|
edge["sourceHandle"] = port[source]
|
||||||
elif "source_port" in edge:
|
elif "source_port" in edge:
|
||||||
edge["sourceHandle"] = edge.pop("source_port")
|
edge["sourceHandle"] = edge.pop("source_port")
|
||||||
|
elif "source_handle" in edge:
|
||||||
|
edge["sourceHandle"] = edge.pop("source_handle")
|
||||||
else:
|
else:
|
||||||
typ = edge.get("type")
|
typ = edge.get("type")
|
||||||
if typ == "communication":
|
if typ == "communication":
|
||||||
@@ -292,6 +310,8 @@ def modify_to_backend_format(data: list[dict[str, Any]]) -> list[dict[str, Any]]
|
|||||||
edge["targetHandle"] = port[target]
|
edge["targetHandle"] = port[target]
|
||||||
elif "target_port" in edge:
|
elif "target_port" in edge:
|
||||||
edge["targetHandle"] = edge.pop("target_port")
|
edge["targetHandle"] = edge.pop("target_port")
|
||||||
|
elif "target_handle" in edge:
|
||||||
|
edge["targetHandle"] = edge.pop("target_handle")
|
||||||
else:
|
else:
|
||||||
typ = edge.get("type")
|
typ = edge.get("type")
|
||||||
if typ == "communication":
|
if typ == "communication":
|
||||||
@@ -597,6 +617,8 @@ def resource_plr_to_ulab(resource_plr: "ResourcePLR", parent_name: str = None, w
|
|||||||
"tube": "tube",
|
"tube": "tube",
|
||||||
"bottle_carrier": "bottle_carrier",
|
"bottle_carrier": "bottle_carrier",
|
||||||
"plate_adapter": "plate_adapter",
|
"plate_adapter": "plate_adapter",
|
||||||
|
"electrode_sheet": "electrode_sheet",
|
||||||
|
"material_hole": "material_hole",
|
||||||
}
|
}
|
||||||
if source in replace_info:
|
if source in replace_info:
|
||||||
return replace_info[source]
|
return replace_info[source]
|
||||||
|
|||||||
@@ -5,6 +5,8 @@ from pydantic import BaseModel, field_serializer, field_validator, ValidationErr
|
|||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
from typing import List, Tuple, Any, Dict, Literal, Optional, cast, TYPE_CHECKING, Union
|
from typing import List, Tuple, Any, Dict, Literal, Optional, cast, TYPE_CHECKING, Union
|
||||||
|
|
||||||
|
from typing_extensions import TypedDict
|
||||||
|
|
||||||
from unilabos.resources.plr_additional_res_reg import register
|
from unilabos.resources.plr_additional_res_reg import register
|
||||||
from unilabos.utils.log import logger
|
from unilabos.utils.log import logger
|
||||||
|
|
||||||
@@ -13,6 +15,29 @@ if TYPE_CHECKING:
|
|||||||
from pylabrobot.resources import Resource as PLRResource
|
from pylabrobot.resources import Resource as PLRResource
|
||||||
|
|
||||||
|
|
||||||
|
EXTRA_CLASS = "unilabos_resource_class"
|
||||||
|
EXTRA_SAMPLE_UUID = "sample_uuid"
|
||||||
|
EXTRA_UNILABOS_SAMPLE_UUID = "unilabos_sample_uuid"
|
||||||
|
|
||||||
|
# 函数参数名常量 - 用于自动注入 sample_uuids 列表
|
||||||
|
PARAM_SAMPLE_UUIDS = "sample_uuids"
|
||||||
|
|
||||||
|
# JSON Command 中的系统参数字段名
|
||||||
|
JSON_UNILABOS_PARAM = "unilabos_param"
|
||||||
|
|
||||||
|
# 返回值中的 samples 字段名
|
||||||
|
RETURN_UNILABOS_SAMPLES = "unilabos_samples"
|
||||||
|
|
||||||
|
# sample_uuids 参数类型 (用于 virtual bench 等设备添加 sample_uuids 参数)
|
||||||
|
SampleUUIDsType = Dict[str, Optional["PLRResource"]]
|
||||||
|
|
||||||
|
|
||||||
|
class LabSample(TypedDict):
|
||||||
|
sample_uuid: str
|
||||||
|
oss_path: str
|
||||||
|
extra: Dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
class ResourceDictPositionSize(BaseModel):
|
class ResourceDictPositionSize(BaseModel):
|
||||||
depth: float = Field(description="Depth", default=0.0) # z
|
depth: float = Field(description="Depth", default=0.0) # z
|
||||||
width: float = Field(description="Width", default=0.0) # x
|
width: float = Field(description="Width", default=0.0) # x
|
||||||
@@ -393,7 +418,7 @@ class ResourceTreeSet(object):
|
|||||||
"parent": parent_resource, # 直接传入 ResourceDict 对象
|
"parent": parent_resource, # 直接传入 ResourceDict 对象
|
||||||
"parent_uuid": parent_uuid, # 使用 parent_uuid 而不是 parent 对象
|
"parent_uuid": parent_uuid, # 使用 parent_uuid 而不是 parent 对象
|
||||||
"type": replace_plr_type(d.get("category", "")),
|
"type": replace_plr_type(d.get("category", "")),
|
||||||
"class": d.get("class", ""),
|
"class": extra.get(EXTRA_CLASS, ""),
|
||||||
"position": pos,
|
"position": pos,
|
||||||
"pose": pos,
|
"pose": pos,
|
||||||
"config": {
|
"config": {
|
||||||
@@ -443,7 +468,7 @@ class ResourceTreeSet(object):
|
|||||||
trees.append(tree_instance)
|
trees.append(tree_instance)
|
||||||
return cls(trees)
|
return cls(trees)
|
||||||
|
|
||||||
def to_plr_resources(self) -> List["PLRResource"]:
|
def to_plr_resources(self, skip_devices=True) -> List["PLRResource"]:
|
||||||
"""
|
"""
|
||||||
将 ResourceTreeSet 转换为 PLR 资源列表
|
将 ResourceTreeSet 转换为 PLR 资源列表
|
||||||
|
|
||||||
@@ -468,6 +493,7 @@ class ResourceTreeSet(object):
|
|||||||
name_to_uuid[node.res_content.name] = node.res_content.uuid
|
name_to_uuid[node.res_content.name] = node.res_content.uuid
|
||||||
all_states[node.res_content.name] = node.res_content.data
|
all_states[node.res_content.name] = node.res_content.data
|
||||||
name_to_extra[node.res_content.name] = node.res_content.extra
|
name_to_extra[node.res_content.name] = node.res_content.extra
|
||||||
|
name_to_extra[node.res_content.name][EXTRA_CLASS] = node.res_content.klass
|
||||||
for child in node.children:
|
for child in node.children:
|
||||||
collect_node_data(child, name_to_uuid, all_states, name_to_extra)
|
collect_node_data(child, name_to_uuid, all_states, name_to_extra)
|
||||||
|
|
||||||
@@ -512,7 +538,10 @@ class ResourceTreeSet(object):
|
|||||||
plr_dict = node_to_plr_dict(tree.root_node, has_model)
|
plr_dict = node_to_plr_dict(tree.root_node, has_model)
|
||||||
try:
|
try:
|
||||||
sub_cls = find_subclass(plr_dict["type"], PLRResource)
|
sub_cls = find_subclass(plr_dict["type"], PLRResource)
|
||||||
if sub_cls is None:
|
if skip_devices and plr_dict["type"] == "device":
|
||||||
|
logger.info(f"跳过更新 {plr_dict['name']} 设备是class")
|
||||||
|
continue
|
||||||
|
elif sub_cls is None:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"无法找到类型 {plr_dict['type']} 对应的 PLR 资源类。原始信息:{tree.root_node.res_content}"
|
f"无法找到类型 {plr_dict['type']} 对应的 PLR 资源类。原始信息:{tree.root_node.res_content}"
|
||||||
)
|
)
|
||||||
@@ -520,6 +549,11 @@ class ResourceTreeSet(object):
|
|||||||
if "category" not in spec.parameters:
|
if "category" not in spec.parameters:
|
||||||
plr_dict.pop("category", None)
|
plr_dict.pop("category", None)
|
||||||
plr_resource = sub_cls.deserialize(plr_dict, allow_marshal=True)
|
plr_resource = sub_cls.deserialize(plr_dict, allow_marshal=True)
|
||||||
|
from pylabrobot.resources import Coordinate
|
||||||
|
from pylabrobot.serializer import deserialize
|
||||||
|
|
||||||
|
location = cast(Coordinate, deserialize(plr_dict["location"]))
|
||||||
|
plr_resource.location = location
|
||||||
plr_resource.load_all_state(all_states)
|
plr_resource.load_all_state(all_states)
|
||||||
# 使用 DeviceNodeResourceTracker 设置 UUID 和 Extra
|
# 使用 DeviceNodeResourceTracker 设置 UUID 和 Extra
|
||||||
tracker.loop_set_uuid(plr_resource, name_to_uuid)
|
tracker.loop_set_uuid(plr_resource, name_to_uuid)
|
||||||
@@ -976,7 +1010,7 @@ class DeviceNodeResourceTracker(object):
|
|||||||
extra = name_to_extra_map[resource_name]
|
extra = name_to_extra_map[resource_name]
|
||||||
self.set_resource_extra(res, extra)
|
self.set_resource_extra(res, extra)
|
||||||
if len(extra):
|
if len(extra):
|
||||||
logger.debug(f"设置资源Extra: {resource_name} -> {extra}")
|
logger.trace(f"设置资源Extra: {resource_name} -> {extra}")
|
||||||
return 1
|
return 1
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|||||||
@@ -361,7 +361,14 @@ def convert_to_ros_msg(ros_msg_type: Union[Type, Any], obj: Any) -> Any:
|
|||||||
if hasattr(ros_msg, key):
|
if hasattr(ros_msg, key):
|
||||||
attr = getattr(ros_msg, key)
|
attr = getattr(ros_msg, key)
|
||||||
if isinstance(attr, (float, int, str, bool)):
|
if isinstance(attr, (float, int, str, bool)):
|
||||||
setattr(ros_msg, key, type(attr)(value))
|
# 处理list类型的值,取第一个元素或抛出错误
|
||||||
|
if isinstance(value, list):
|
||||||
|
if len(value) > 0:
|
||||||
|
setattr(ros_msg, key, type(attr)(value[0]))
|
||||||
|
else:
|
||||||
|
setattr(ros_msg, key, type(attr)()) # 使用默认值
|
||||||
|
else:
|
||||||
|
setattr(ros_msg, key, type(attr)(value))
|
||||||
elif isinstance(attr, (list, tuple)) and isinstance(value, Iterable):
|
elif isinstance(attr, (list, tuple)) and isinstance(value, Iterable):
|
||||||
td = ros_msg.SLOT_TYPES[ind].value_type
|
td = ros_msg.SLOT_TYPES[ind].value_type
|
||||||
if isinstance(td, NamespacedType):
|
if isinstance(td, NamespacedType):
|
||||||
@@ -374,9 +381,35 @@ def convert_to_ros_msg(ros_msg_type: Union[Type, Any], obj: Any) -> Any:
|
|||||||
setattr(ros_msg, key, []) # FIXME
|
setattr(ros_msg, key, []) # FIXME
|
||||||
elif "array.array" in str(type(attr)):
|
elif "array.array" in str(type(attr)):
|
||||||
if attr.typecode == "f" or attr.typecode == "d":
|
if attr.typecode == "f" or attr.typecode == "d":
|
||||||
|
# 如果是单个值,转换为列表
|
||||||
|
if value is None:
|
||||||
|
value = []
|
||||||
|
elif not isinstance(value, Iterable) or isinstance(value, (str, bytes)):
|
||||||
|
value = [value]
|
||||||
setattr(ros_msg, key, [float(i) for i in value])
|
setattr(ros_msg, key, [float(i) for i in value])
|
||||||
else:
|
else:
|
||||||
setattr(ros_msg, key, value)
|
# 对于整数数组,需要确保是序列且每个值在有效范围内
|
||||||
|
if value is None:
|
||||||
|
value = []
|
||||||
|
elif not isinstance(value, Iterable) or isinstance(value, (str, bytes)):
|
||||||
|
# 如果是单个值,转换为列表
|
||||||
|
value = [value]
|
||||||
|
# 确保每个整数值在有效范围内(-2147483648 到 2147483647)
|
||||||
|
converted_value = []
|
||||||
|
for i in value:
|
||||||
|
if i is None:
|
||||||
|
continue # 跳过 None 值
|
||||||
|
if isinstance(i, (int, float)):
|
||||||
|
int_val = int(i)
|
||||||
|
# 确保在 int32 范围内
|
||||||
|
if int_val < -2147483648:
|
||||||
|
int_val = -2147483648
|
||||||
|
elif int_val > 2147483647:
|
||||||
|
int_val = 2147483647
|
||||||
|
converted_value.append(int_val)
|
||||||
|
else:
|
||||||
|
converted_value.append(i)
|
||||||
|
setattr(ros_msg, key, converted_value)
|
||||||
else:
|
else:
|
||||||
nested_ros_msg = convert_to_ros_msg(type(attr)(), value)
|
nested_ros_msg = convert_to_ros_msg(type(attr)(), value)
|
||||||
setattr(ros_msg, key, nested_ros_msg)
|
setattr(ros_msg, key, nested_ros_msg)
|
||||||
|
|||||||
@@ -4,8 +4,20 @@ import json
|
|||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
from typing import get_type_hints, TypeVar, Generic, Dict, Any, Type, TypedDict, Optional, List, TYPE_CHECKING, Union, \
|
from typing import (
|
||||||
Tuple
|
get_type_hints,
|
||||||
|
TypeVar,
|
||||||
|
Generic,
|
||||||
|
Dict,
|
||||||
|
Any,
|
||||||
|
Type,
|
||||||
|
TypedDict,
|
||||||
|
Optional,
|
||||||
|
List,
|
||||||
|
TYPE_CHECKING,
|
||||||
|
Union,
|
||||||
|
Tuple,
|
||||||
|
)
|
||||||
|
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
import asyncio
|
import asyncio
|
||||||
@@ -48,8 +60,10 @@ from unilabos.resources.resource_tracker import (
|
|||||||
ResourceTreeSet,
|
ResourceTreeSet,
|
||||||
ResourceTreeInstance,
|
ResourceTreeInstance,
|
||||||
ResourceDictInstance,
|
ResourceDictInstance,
|
||||||
|
EXTRA_SAMPLE_UUID,
|
||||||
|
PARAM_SAMPLE_UUIDS,
|
||||||
|
JSON_UNILABOS_PARAM,
|
||||||
)
|
)
|
||||||
from unilabos.ros.x.rclpyx import get_event_loop
|
|
||||||
from unilabos.ros.utils.driver_creator import WorkstationNodeCreator, PyLabRobotCreator, DeviceClassCreator
|
from unilabos.ros.utils.driver_creator import WorkstationNodeCreator, PyLabRobotCreator, DeviceClassCreator
|
||||||
from rclpy.task import Task, Future
|
from rclpy.task import Task, Future
|
||||||
from unilabos.utils.import_manager import default_manager
|
from unilabos.utils.import_manager import default_manager
|
||||||
@@ -185,7 +199,7 @@ class PropertyPublisher:
|
|||||||
f"创建发布者 {name} 失败,可能由于注册表有误,类型: {msg_type},错误: {ex}\n{traceback.format_exc()}"
|
f"创建发布者 {name} 失败,可能由于注册表有误,类型: {msg_type},错误: {ex}\n{traceback.format_exc()}"
|
||||||
)
|
)
|
||||||
self.timer = node.create_timer(self.timer_period, self.publish_property)
|
self.timer = node.create_timer(self.timer_period, self.publish_property)
|
||||||
self.__loop = get_event_loop()
|
self.__loop = ROS2DeviceNode.get_asyncio_loop()
|
||||||
str_msg_type = str(msg_type)[8:-2]
|
str_msg_type = str(msg_type)[8:-2]
|
||||||
self.node.lab_logger().trace(f"发布属性: {name}, 类型: {str_msg_type}, 周期: {initial_period}秒, QoS: {qos}")
|
self.node.lab_logger().trace(f"发布属性: {name}, 类型: {str_msg_type}, 周期: {initial_period}秒, QoS: {qos}")
|
||||||
|
|
||||||
@@ -217,14 +231,15 @@ class PropertyPublisher:
|
|||||||
|
|
||||||
def publish_property(self):
|
def publish_property(self):
|
||||||
try:
|
try:
|
||||||
self.node.lab_logger().trace(f"【.publish_property】开始发布属性: {self.name}")
|
# self.node.lab_logger().trace(f"【.publish_property】开始发布属性: {self.name}")
|
||||||
value = self.get_property()
|
value = self.get_property()
|
||||||
if self.print_publish:
|
if self.print_publish:
|
||||||
self.node.lab_logger().trace(f"【.publish_property】发布 {self.msg_type}: {value}")
|
pass
|
||||||
|
# self.node.lab_logger().trace(f"【.publish_property】发布 {self.msg_type}: {value}")
|
||||||
if value is not None:
|
if value is not None:
|
||||||
msg = convert_to_ros_msg(self.msg_type, value)
|
msg = convert_to_ros_msg(self.msg_type, value)
|
||||||
self.publisher_.publish(msg)
|
self.publisher_.publish(msg)
|
||||||
self.node.lab_logger().trace(f"【.publish_property】属性 {self.name} 发布成功")
|
# self.node.lab_logger().trace(f"【.publish_property】属性 {self.name} 发布成功")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.node.lab_logger().error(
|
self.node.lab_logger().error(
|
||||||
f"【.publish_property】发布属性 {self.publisher_.topic} 出错: {str(e)}\n{traceback.format_exc()}"
|
f"【.publish_property】发布属性 {self.publisher_.topic} 出错: {str(e)}\n{traceback.format_exc()}"
|
||||||
@@ -362,6 +377,7 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
from pylabrobot.resources.deck import Deck
|
from pylabrobot.resources.deck import Deck
|
||||||
from pylabrobot.resources import Coordinate
|
from pylabrobot.resources import Coordinate
|
||||||
from pylabrobot.resources import Plate
|
from pylabrobot.resources import Plate
|
||||||
|
|
||||||
# 物料传输到对应的node节点
|
# 物料传输到对应的node节点
|
||||||
client = self._resource_clients["c2s_update_resource_tree"]
|
client = self._resource_clients["c2s_update_resource_tree"]
|
||||||
request = SerialCommand.Request()
|
request = SerialCommand.Request()
|
||||||
@@ -389,33 +405,27 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
rts: ResourceTreeSet = ResourceTreeSet.from_raw_dict_list(input_resources)
|
rts: ResourceTreeSet = ResourceTreeSet.from_raw_dict_list(input_resources)
|
||||||
parent_resource = None
|
parent_resource = None
|
||||||
if bind_parent_id != self.node_name:
|
if bind_parent_id != self.node_name:
|
||||||
parent_resource = self.resource_tracker.figure_resource(
|
parent_resource = self.resource_tracker.figure_resource({"name": bind_parent_id})
|
||||||
{"name": bind_parent_id}
|
|
||||||
)
|
|
||||||
for r in rts.root_nodes:
|
for r in rts.root_nodes:
|
||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
r.res_content.parent_uuid = parent_resource.unilabos_uuid
|
r.res_content.parent_uuid = parent_resource.unilabos_uuid
|
||||||
else:
|
else:
|
||||||
for r in rts.root_nodes:
|
for r in rts.root_nodes:
|
||||||
r.res_content.parent_uuid = self.uuid
|
r.res_content.parent_uuid = self.uuid
|
||||||
|
rts_plr_instances = rts.to_plr_resources()
|
||||||
if len(LIQUID_INPUT_SLOT) and LIQUID_INPUT_SLOT[0] == -1 and len(rts.root_nodes) == 1 and isinstance(rts.root_nodes[0], RegularContainer):
|
if len(rts.root_nodes) == 1 and isinstance(rts_plr_instances[0], RegularContainer):
|
||||||
# noinspection PyTypeChecker
|
# noinspection PyTypeChecker
|
||||||
container_instance: RegularContainer = rts.root_nodes[0]
|
container_instance: RegularContainer = rts_plr_instances[0]
|
||||||
found_resources = self.resource_tracker.figure_resource(
|
found_resources = self.resource_tracker.figure_resource({"name": container_instance.name}, try_mode=True)
|
||||||
{"id": container_instance.name}, try_mode=True
|
|
||||||
)
|
|
||||||
if not len(found_resources):
|
if not len(found_resources):
|
||||||
self.resource_tracker.add_resource(container_instance)
|
self.resource_tracker.add_resource(container_instance)
|
||||||
logger.info(f"添加物料{container_instance.name}到资源跟踪器")
|
logger.info(f"添加物料{container_instance.name}到资源跟踪器")
|
||||||
else:
|
else:
|
||||||
assert (
|
assert len(found_resources) == 1, f"找到多个同名物料: {container_instance.name}, 请检查物料系统"
|
||||||
len(found_resources) == 1
|
|
||||||
), f"找到多个同名物料: {container_instance.name}, 请检查物料系统"
|
|
||||||
found_resource = found_resources[0]
|
found_resource = found_resources[0]
|
||||||
if isinstance(found_resource, RegularContainer):
|
if isinstance(found_resource, RegularContainer):
|
||||||
logger.info(f"更新物料{container_instance.name}的数据{found_resource.state}")
|
logger.info(f"更新物料{container_instance.name}的数据{found_resource.state}")
|
||||||
found_resource.state.update(json.loads(container_instance.state))
|
found_resource.state.update(container_instance.state)
|
||||||
elif isinstance(found_resource, dict):
|
elif isinstance(found_resource, dict):
|
||||||
raise ValueError("已不支持 字典 版本的RegularContainer")
|
raise ValueError("已不支持 字典 版本的RegularContainer")
|
||||||
else:
|
else:
|
||||||
@@ -423,14 +433,16 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
f"更新物料{container_instance.name}出现不支持的数据类型{type(found_resource)} {found_resource}"
|
f"更新物料{container_instance.name}出现不支持的数据类型{type(found_resource)} {found_resource}"
|
||||||
)
|
)
|
||||||
# noinspection PyUnresolvedReferences
|
# noinspection PyUnresolvedReferences
|
||||||
request.command = json.dumps({
|
request.command = json.dumps(
|
||||||
"action": "add",
|
{
|
||||||
"data": {
|
"action": "add",
|
||||||
"data": rts.dump(),
|
"data": {
|
||||||
"mount_uuid": parent_resource.unilabos_uuid if parent_resource is not None else "",
|
"data": rts.dump(),
|
||||||
"first_add": False,
|
"mount_uuid": parent_resource.unilabos_uuid if parent_resource is not None else self.uuid,
|
||||||
},
|
"first_add": False,
|
||||||
})
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
tree_response: SerialCommand.Response = await client.call_async(request)
|
tree_response: SerialCommand.Response = await client.call_async(request)
|
||||||
uuid_maps = json.loads(tree_response.response)
|
uuid_maps = json.loads(tree_response.response)
|
||||||
plr_instances = rts.to_plr_resources()
|
plr_instances = rts.to_plr_resources()
|
||||||
@@ -472,7 +484,9 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
if len(ADD_LIQUID_TYPE) == 1 and len(LIQUID_VOLUME) == 1 and len(LIQUID_INPUT_SLOT) > 1:
|
if len(ADD_LIQUID_TYPE) == 1 and len(LIQUID_VOLUME) == 1 and len(LIQUID_INPUT_SLOT) > 1:
|
||||||
ADD_LIQUID_TYPE = ADD_LIQUID_TYPE * len(LIQUID_INPUT_SLOT)
|
ADD_LIQUID_TYPE = ADD_LIQUID_TYPE * len(LIQUID_INPUT_SLOT)
|
||||||
LIQUID_VOLUME = LIQUID_VOLUME * len(LIQUID_INPUT_SLOT)
|
LIQUID_VOLUME = LIQUID_VOLUME * len(LIQUID_INPUT_SLOT)
|
||||||
self.lab_logger().warning(f"增加液体资源时,数量为1,自动补全为 {len(LIQUID_INPUT_SLOT)} 个")
|
self.lab_logger().warning(
|
||||||
|
f"增加液体资源时,数量为1,自动补全为 {len(LIQUID_INPUT_SLOT)} 个"
|
||||||
|
)
|
||||||
for liquid_type, liquid_volume, liquid_input_slot in zip(
|
for liquid_type, liquid_volume, liquid_input_slot in zip(
|
||||||
ADD_LIQUID_TYPE, LIQUID_VOLUME, LIQUID_INPUT_SLOT
|
ADD_LIQUID_TYPE, LIQUID_VOLUME, LIQUID_INPUT_SLOT
|
||||||
):
|
):
|
||||||
@@ -491,9 +505,15 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
input_wells = []
|
input_wells = []
|
||||||
for r in LIQUID_INPUT_SLOT:
|
for r in LIQUID_INPUT_SLOT:
|
||||||
input_wells.append(plr_instance.children[r])
|
input_wells.append(plr_instance.children[r])
|
||||||
final_response["liquid_input_resource_tree"] = ResourceTreeSet.from_plr_resources(input_wells).dump()
|
final_response["liquid_input_resource_tree"] = ResourceTreeSet.from_plr_resources(
|
||||||
|
input_wells
|
||||||
|
).dump()
|
||||||
res.response = json.dumps(final_response)
|
res.response = json.dumps(final_response)
|
||||||
if issubclass(parent_resource.__class__, Deck) and hasattr(parent_resource, "assign_child_at_slot") and "slot" in other_calling_param:
|
if (
|
||||||
|
issubclass(parent_resource.__class__, Deck)
|
||||||
|
and hasattr(parent_resource, "assign_child_at_slot")
|
||||||
|
and "slot" in other_calling_param
|
||||||
|
):
|
||||||
other_calling_param["slot"] = int(other_calling_param["slot"])
|
other_calling_param["slot"] = int(other_calling_param["slot"])
|
||||||
parent_resource.assign_child_at_slot(plr_instance, **other_calling_param)
|
parent_resource.assign_child_at_slot(plr_instance, **other_calling_param)
|
||||||
else:
|
else:
|
||||||
@@ -508,14 +528,16 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
rts_with_parent = ResourceTreeSet.from_plr_resources([parent_resource])
|
rts_with_parent = ResourceTreeSet.from_plr_resources([parent_resource])
|
||||||
if rts_with_parent.root_nodes[0].res_content.uuid_parent is None:
|
if rts_with_parent.root_nodes[0].res_content.uuid_parent is None:
|
||||||
rts_with_parent.root_nodes[0].res_content.parent_uuid = self.uuid
|
rts_with_parent.root_nodes[0].res_content.parent_uuid = self.uuid
|
||||||
request.command = json.dumps({
|
request.command = json.dumps(
|
||||||
"action": "add",
|
{
|
||||||
"data": {
|
"action": "add",
|
||||||
"data": rts_with_parent.dump(),
|
"data": {
|
||||||
"mount_uuid": rts_with_parent.root_nodes[0].res_content.uuid_parent,
|
"data": rts_with_parent.dump(),
|
||||||
"first_add": False,
|
"mount_uuid": rts_with_parent.root_nodes[0].res_content.uuid_parent,
|
||||||
},
|
"first_add": False,
|
||||||
})
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
tree_response: SerialCommand.Response = await client.call_async(request)
|
tree_response: SerialCommand.Response = await client.call_async(request)
|
||||||
uuid_maps = json.loads(tree_response.response)
|
uuid_maps = json.loads(tree_response.response)
|
||||||
self.resource_tracker.loop_update_uuid(input_resources, uuid_maps)
|
self.resource_tracker.loop_update_uuid(input_resources, uuid_maps)
|
||||||
@@ -625,7 +647,7 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
) # type: ignore
|
) # type: ignore
|
||||||
raw_nodes = json.loads(response.response)
|
raw_nodes = json.loads(response.response)
|
||||||
tree_set = ResourceTreeSet.from_raw_dict_list(raw_nodes)
|
tree_set = ResourceTreeSet.from_raw_dict_list(raw_nodes)
|
||||||
self.lab_logger().debug(f"获取资源结果: {len(tree_set.trees)} 个资源树")
|
self.lab_logger().trace(f"获取资源结果: {len(tree_set.trees)} 个资源树 {tree_set.root_nodes}")
|
||||||
return tree_set
|
return tree_set
|
||||||
|
|
||||||
async def get_resource_with_dir(self, resource_id: str, with_children: bool = True) -> "ResourcePLR":
|
async def get_resource_with_dir(self, resource_id: str, with_children: bool = True) -> "ResourcePLR":
|
||||||
@@ -812,7 +834,9 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
}
|
}
|
||||||
|
|
||||||
def _handle_update(
|
def _handle_update(
|
||||||
plr_resources: List[Union[ResourcePLR, ResourceDictInstance]], tree_set: ResourceTreeSet, additional_add_params: Dict[str, Any]
|
plr_resources: List[Union[ResourcePLR, ResourceDictInstance]],
|
||||||
|
tree_set: ResourceTreeSet,
|
||||||
|
additional_add_params: Dict[str, Any],
|
||||||
) -> Tuple[Dict[str, Any], List[ResourcePLR]]:
|
) -> Tuple[Dict[str, Any], List[ResourcePLR]]:
|
||||||
"""
|
"""
|
||||||
处理资源更新操作的内部函数
|
处理资源更新操作的内部函数
|
||||||
@@ -837,7 +861,10 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
original_parent_resource = original_instance.parent
|
original_parent_resource = original_instance.parent
|
||||||
original_parent_resource_uuid = getattr(original_parent_resource, "unilabos_uuid", None)
|
original_parent_resource_uuid = getattr(original_parent_resource, "unilabos_uuid", None)
|
||||||
target_parent_resource_uuid = tree.root_node.res_content.uuid_parent
|
target_parent_resource_uuid = tree.root_node.res_content.uuid_parent
|
||||||
not_same_parent = original_parent_resource_uuid != target_parent_resource_uuid and original_parent_resource is not None
|
not_same_parent = (
|
||||||
|
original_parent_resource_uuid != target_parent_resource_uuid
|
||||||
|
and original_parent_resource is not None
|
||||||
|
)
|
||||||
old_name = original_instance.name
|
old_name = original_instance.name
|
||||||
new_name = plr_resource.name
|
new_name = plr_resource.name
|
||||||
parent_appended = False
|
parent_appended = False
|
||||||
@@ -873,8 +900,16 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
else:
|
else:
|
||||||
# 判断是否变更了resource_site,重新登记
|
# 判断是否变更了resource_site,重新登记
|
||||||
target_site = original_instance.unilabos_extra.get("update_resource_site")
|
target_site = original_instance.unilabos_extra.get("update_resource_site")
|
||||||
sites = original_instance.parent.sites if original_instance.parent is not None and hasattr(original_instance.parent, "sites") else None
|
sites = (
|
||||||
site_names = list(original_instance.parent._ordering.keys()) if original_instance.parent is not None and hasattr(original_instance.parent, "sites") else []
|
original_instance.parent.sites
|
||||||
|
if original_instance.parent is not None and hasattr(original_instance.parent, "sites")
|
||||||
|
else None
|
||||||
|
)
|
||||||
|
site_names = (
|
||||||
|
list(original_instance.parent._ordering.keys())
|
||||||
|
if original_instance.parent is not None and hasattr(original_instance.parent, "sites")
|
||||||
|
else []
|
||||||
|
)
|
||||||
if target_site is not None and sites is not None and site_names is not None:
|
if target_site is not None and sites is not None and site_names is not None:
|
||||||
site_index = sites.index(original_instance)
|
site_index = sites.index(original_instance)
|
||||||
site_name = site_names[site_index]
|
site_name = site_names[site_index]
|
||||||
@@ -885,6 +920,9 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
parent_appended = True
|
parent_appended = True
|
||||||
|
|
||||||
# 加载状态
|
# 加载状态
|
||||||
|
original_instance.location = plr_resource.location
|
||||||
|
original_instance.rotation = plr_resource.rotation
|
||||||
|
original_instance.barcode = plr_resource.barcode
|
||||||
original_instance.load_all_state(states)
|
original_instance.load_all_state(states)
|
||||||
child_count = len(original_instance.get_all_children())
|
child_count = len(original_instance.get_all_children())
|
||||||
self.lab_logger().info(
|
self.lab_logger().info(
|
||||||
@@ -908,9 +946,7 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
action = i.get("action") # remove, add, update
|
action = i.get("action") # remove, add, update
|
||||||
resources_uuid: List[str] = i.get("data") # 资源数据
|
resources_uuid: List[str] = i.get("data") # 资源数据
|
||||||
additional_add_params = i.get("additional_add_params", {}) # 额外参数
|
additional_add_params = i.get("additional_add_params", {}) # 额外参数
|
||||||
self.lab_logger().trace(
|
self.lab_logger().trace(f"[资源同步] 处理 {action}, " f"resources count: {len(resources_uuid)}")
|
||||||
f"[资源同步] 处理 {action}, " f"resources count: {len(resources_uuid)}"
|
|
||||||
)
|
|
||||||
tree_set = None
|
tree_set = None
|
||||||
if action in ["add", "update"]:
|
if action in ["add", "update"]:
|
||||||
tree_set = await self.get_resource(
|
tree_set = await self.get_resource(
|
||||||
@@ -937,9 +973,13 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
tree.root_node.res_content.parent_uuid = self.uuid
|
tree.root_node.res_content.parent_uuid = self.uuid
|
||||||
r = SerialCommand.Request()
|
r = SerialCommand.Request()
|
||||||
r.command = json.dumps(
|
r.command = json.dumps(
|
||||||
{"data": {"data": new_tree_set.dump()}, "action": "update"}) # 和Update Resource一致
|
{"data": {"data": new_tree_set.dump()}, "action": "update"}
|
||||||
|
) # 和Update Resource一致
|
||||||
response: SerialCommand_Response = await self._resource_clients[
|
response: SerialCommand_Response = await self._resource_clients[
|
||||||
"c2s_update_resource_tree"].call_async(r) # type: ignore
|
"c2s_update_resource_tree"
|
||||||
|
].call_async(
|
||||||
|
r
|
||||||
|
) # type: ignore
|
||||||
self.lab_logger().info(f"确认资源云端 Add 结果: {response.response}")
|
self.lab_logger().info(f"确认资源云端 Add 结果: {response.response}")
|
||||||
results.append(result)
|
results.append(result)
|
||||||
elif action == "update":
|
elif action == "update":
|
||||||
@@ -959,9 +999,13 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
tree.root_node.res_content.parent_uuid = self.uuid
|
tree.root_node.res_content.parent_uuid = self.uuid
|
||||||
r = SerialCommand.Request()
|
r = SerialCommand.Request()
|
||||||
r.command = json.dumps(
|
r.command = json.dumps(
|
||||||
{"data": {"data": new_tree_set.dump()}, "action": "update"}) # 和Update Resource一致
|
{"data": {"data": new_tree_set.dump()}, "action": "update"}
|
||||||
|
) # 和Update Resource一致
|
||||||
response: SerialCommand_Response = await self._resource_clients[
|
response: SerialCommand_Response = await self._resource_clients[
|
||||||
"c2s_update_resource_tree"].call_async(r) # type: ignore
|
"c2s_update_resource_tree"
|
||||||
|
].call_async(
|
||||||
|
r
|
||||||
|
) # type: ignore
|
||||||
self.lab_logger().info(f"确认资源云端 Update 结果: {response.response}")
|
self.lab_logger().info(f"确认资源云端 Update 结果: {response.response}")
|
||||||
results.append(result)
|
results.append(result)
|
||||||
elif action == "remove":
|
elif action == "remove":
|
||||||
@@ -1320,26 +1364,41 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
resource_inputs = action_kwargs[k] if is_sequence else [action_kwargs[k]]
|
resource_inputs = action_kwargs[k] if is_sequence else [action_kwargs[k]]
|
||||||
|
|
||||||
# 批量查询资源
|
# 批量查询资源
|
||||||
queried_resources = []
|
queried_resources: list = [None] * len(resource_inputs)
|
||||||
for resource_data in resource_inputs:
|
uuid_indices: list[tuple[int, str, dict]] = [] # (index, uuid, resource_data)
|
||||||
|
|
||||||
|
# 第一遍:处理没有uuid的资源,收集有uuid的资源信息
|
||||||
|
for idx, resource_data in enumerate(resource_inputs):
|
||||||
unilabos_uuid = resource_data.get("data", {}).get("unilabos_uuid")
|
unilabos_uuid = resource_data.get("data", {}).get("unilabos_uuid")
|
||||||
if unilabos_uuid is None:
|
if unilabos_uuid is None:
|
||||||
plr_resource = await self.get_resource_with_dir(
|
plr_resource = await self.get_resource_with_dir(
|
||||||
resource_id=resource_data["id"], with_children=True
|
resource_id=resource_data["id"], with_children=True
|
||||||
)
|
)
|
||||||
|
if "sample_id" in resource_data:
|
||||||
|
plr_resource.unilabos_extra[EXTRA_SAMPLE_UUID] = resource_data["sample_id"]
|
||||||
|
queried_resources[idx] = plr_resource
|
||||||
else:
|
else:
|
||||||
resource_tree = await self.get_resource([unilabos_uuid])
|
uuid_indices.append((idx, unilabos_uuid, resource_data))
|
||||||
plr_resource = resource_tree.to_plr_resources()[0]
|
|
||||||
if "sample_id" in resource_data:
|
# 第二遍:批量查询有uuid的资源
|
||||||
plr_resource.unilabos_extra["sample_uuid"] = resource_data["sample_id"]
|
if uuid_indices:
|
||||||
queried_resources.append(plr_resource)
|
uuids = [item[1] for item in uuid_indices]
|
||||||
|
resource_tree = await self.get_resource(uuids)
|
||||||
|
plr_resources = resource_tree.to_plr_resources()
|
||||||
|
for i, (idx, _, resource_data) in enumerate(uuid_indices):
|
||||||
|
plr_resource = plr_resources[i]
|
||||||
|
if "sample_id" in resource_data:
|
||||||
|
plr_resource.unilabos_extra[EXTRA_SAMPLE_UUID] = resource_data["sample_id"]
|
||||||
|
queried_resources[idx] = plr_resource
|
||||||
|
|
||||||
self.lab_logger().debug(f"资源查询结果: 共 {len(queried_resources)} 个资源")
|
self.lab_logger().debug(f"资源查询结果: 共 {len(queried_resources)} 个资源")
|
||||||
|
|
||||||
# 通过资源跟踪器获取本地实例
|
# 通过资源跟踪器获取本地实例
|
||||||
final_resources = queried_resources if is_sequence else queried_resources[0]
|
final_resources = queried_resources if is_sequence else queried_resources[0]
|
||||||
if not is_sequence:
|
if not is_sequence:
|
||||||
plr = self.resource_tracker.figure_resource({"name": final_resources.name}, try_mode=False)
|
plr = self.resource_tracker.figure_resource(
|
||||||
|
{"name": final_resources.name}, try_mode=False
|
||||||
|
)
|
||||||
# 保留unilabos_extra
|
# 保留unilabos_extra
|
||||||
if hasattr(final_resources, "unilabos_extra") and hasattr(plr, "unilabos_extra"):
|
if hasattr(final_resources, "unilabos_extra") and hasattr(plr, "unilabos_extra"):
|
||||||
plr.unilabos_extra = getattr(final_resources, "unilabos_extra", {}).copy()
|
plr.unilabos_extra = getattr(final_resources, "unilabos_extra", {}).copy()
|
||||||
@@ -1378,8 +1437,12 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
execution_success = True
|
execution_success = True
|
||||||
except Exception as _:
|
except Exception as _:
|
||||||
execution_error = traceback.format_exc()
|
execution_error = traceback.format_exc()
|
||||||
error(f"异步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{str(action_kwargs)[:1000]}")
|
error(
|
||||||
trace(f"异步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{action_kwargs}")
|
f"异步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{str(action_kwargs)[:1000]}"
|
||||||
|
)
|
||||||
|
trace(
|
||||||
|
f"异步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{action_kwargs}"
|
||||||
|
)
|
||||||
|
|
||||||
future = ROS2DeviceNode.run_async_func(ACTION, trace_error=False, **action_kwargs)
|
future = ROS2DeviceNode.run_async_func(ACTION, trace_error=False, **action_kwargs)
|
||||||
future.add_done_callback(_handle_future_exception)
|
future.add_done_callback(_handle_future_exception)
|
||||||
@@ -1399,9 +1462,11 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
except Exception as _:
|
except Exception as _:
|
||||||
execution_error = traceback.format_exc()
|
execution_error = traceback.format_exc()
|
||||||
error(
|
error(
|
||||||
f"同步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{str(action_kwargs)[:1000]}")
|
f"同步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{str(action_kwargs)[:1000]}"
|
||||||
|
)
|
||||||
trace(
|
trace(
|
||||||
f"同步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{action_kwargs}")
|
f"同步任务 {ACTION.__name__} 报错了\n{traceback.format_exc()}\n原始输入:{action_kwargs}"
|
||||||
|
)
|
||||||
|
|
||||||
future.add_done_callback(_handle_future_exception)
|
future.add_done_callback(_handle_future_exception)
|
||||||
|
|
||||||
@@ -1468,11 +1533,18 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
if isinstance(rs, list):
|
if isinstance(rs, list):
|
||||||
for r in rs:
|
for r in rs:
|
||||||
res = self.resource_tracker.parent_resource(r) # 获取 resource 对象
|
res = self.resource_tracker.parent_resource(r) # 获取 resource 对象
|
||||||
|
if res is None:
|
||||||
|
res = rs
|
||||||
|
if id(res) not in seen:
|
||||||
|
seen.add(id(res))
|
||||||
|
unique_resources.append(res)
|
||||||
else:
|
else:
|
||||||
res = self.resource_tracker.parent_resource(rs)
|
res = self.resource_tracker.parent_resource(rs)
|
||||||
if id(res) not in seen:
|
if res is None:
|
||||||
seen.add(id(res))
|
res = rs
|
||||||
unique_resources.append(res)
|
if id(res) not in seen:
|
||||||
|
seen.add(id(res))
|
||||||
|
unique_resources.append(res)
|
||||||
|
|
||||||
# 使用新的资源树接口
|
# 使用新的资源树接口
|
||||||
if unique_resources:
|
if unique_resources:
|
||||||
@@ -1524,20 +1596,39 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
try:
|
try:
|
||||||
function_name = target["function_name"]
|
function_name = target["function_name"]
|
||||||
function_args = target["function_args"]
|
function_args = target["function_args"]
|
||||||
|
# 获取 unilabos 系统参数
|
||||||
|
unilabos_param: Dict[str, Any] = target[JSON_UNILABOS_PARAM]
|
||||||
|
|
||||||
assert isinstance(function_args, dict), "执行动作时JSON必须为dict类型\n原JSON: {string}"
|
assert isinstance(function_args, dict), "执行动作时JSON必须为dict类型\n原JSON: {string}"
|
||||||
function = getattr(self.driver_instance, function_name)
|
function = getattr(self.driver_instance, function_name)
|
||||||
assert callable(
|
assert callable(
|
||||||
function
|
function
|
||||||
), f"执行动作时JSON中的function_name对应的函数不可调用: {function_name}\n原JSON: {string}"
|
), f"执行动作时JSON中的function_name对应的函数不可调用: {function_name}\n原JSON: {string}"
|
||||||
|
|
||||||
# 处理 ResourceSlot 类型参数
|
# 处理参数(包含 unilabos 系统参数如 sample_uuids)
|
||||||
args_list = default_manager._analyze_method_signature(function)["args"]
|
args_list = default_manager._analyze_method_signature(function, skip_unilabos_params=False)["args"]
|
||||||
for arg in args_list:
|
for arg in args_list:
|
||||||
arg_name = arg["name"]
|
arg_name = arg["name"]
|
||||||
arg_type = arg["type"]
|
arg_type = arg["type"]
|
||||||
|
|
||||||
# 跳过不在 function_args 中的参数
|
# 跳过不在 function_args 中的参数
|
||||||
if arg_name not in function_args:
|
if arg_name not in function_args:
|
||||||
|
# 处理 sample_uuids 参数注入
|
||||||
|
if arg_name == PARAM_SAMPLE_UUIDS:
|
||||||
|
raw_sample_uuids = unilabos_param.get(PARAM_SAMPLE_UUIDS, {})
|
||||||
|
# 将 material uuid 转换为 resource 实例
|
||||||
|
# key: sample_uuid, value: material_uuid -> resource 实例
|
||||||
|
resolved_sample_uuids: Dict[str, Any] = {}
|
||||||
|
for sample_uuid, material_uuid in raw_sample_uuids.items():
|
||||||
|
if material_uuid and self.resource_tracker:
|
||||||
|
resource = self.resource_tracker.uuid_to_resources.get(material_uuid)
|
||||||
|
resolved_sample_uuids[sample_uuid] = resource if resource else material_uuid
|
||||||
|
else:
|
||||||
|
resolved_sample_uuids[sample_uuid] = material_uuid
|
||||||
|
function_args[PARAM_SAMPLE_UUIDS] = resolved_sample_uuids
|
||||||
|
self.lab_logger().debug(
|
||||||
|
f"[JsonCommand] 注入 {PARAM_SAMPLE_UUIDS}: {resolved_sample_uuids}"
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# 处理单个 ResourceSlot
|
# 处理单个 ResourceSlot
|
||||||
@@ -1567,6 +1658,7 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
)
|
)
|
||||||
raise JsonCommandInitError(f"ResourceSlot列表参数转换失败: {arg_name}")
|
raise JsonCommandInitError(f"ResourceSlot列表参数转换失败: {arg_name}")
|
||||||
|
|
||||||
|
# todo: 默认反报送
|
||||||
return function(**function_args)
|
return function(**function_args)
|
||||||
except KeyError as ex:
|
except KeyError as ex:
|
||||||
raise JsonCommandInitError(
|
raise JsonCommandInitError(
|
||||||
@@ -1586,21 +1678,23 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
raise ValueError("至少需要提供一个 UUID")
|
raise ValueError("至少需要提供一个 UUID")
|
||||||
|
|
||||||
uuids_list = list(uuids)
|
uuids_list = list(uuids)
|
||||||
future = self._resource_clients["c2s_update_resource_tree"].call_async(SerialCommand.Request(
|
future = self._resource_clients["c2s_update_resource_tree"].call_async(
|
||||||
command=json.dumps(
|
SerialCommand.Request(
|
||||||
{
|
command=json.dumps(
|
||||||
"data": {"data": uuids_list, "with_children": True},
|
{
|
||||||
"action": "get",
|
"data": {"data": uuids_list, "with_children": True},
|
||||||
}
|
"action": "get",
|
||||||
|
}
|
||||||
|
)
|
||||||
)
|
)
|
||||||
))
|
)
|
||||||
|
|
||||||
# 等待结果(使用while循环,每次sleep 0.05秒,最多等待30秒)
|
# 等待结果(使用while循环,每次sleep 0.05秒,最多等待30秒)
|
||||||
timeout = 30.0
|
timeout = 30.0
|
||||||
elapsed = 0.0
|
elapsed = 0.0
|
||||||
while not future.done() and elapsed < timeout:
|
while not future.done() and elapsed < timeout:
|
||||||
time.sleep(0.05)
|
time.sleep(0.02)
|
||||||
elapsed += 0.05
|
elapsed += 0.02
|
||||||
|
|
||||||
if not future.done():
|
if not future.done():
|
||||||
raise Exception(f"资源查询超时: {uuids_list}")
|
raise Exception(f"资源查询超时: {uuids_list}")
|
||||||
@@ -1651,6 +1745,9 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
try:
|
try:
|
||||||
function_name = target["function_name"]
|
function_name = target["function_name"]
|
||||||
function_args = target["function_args"]
|
function_args = target["function_args"]
|
||||||
|
# 获取 unilabos 系统参数
|
||||||
|
unilabos_param: Dict[str, Any] = target.get(JSON_UNILABOS_PARAM, {})
|
||||||
|
|
||||||
assert isinstance(function_args, dict), "执行动作时JSON必须为dict类型\n原JSON: {string}"
|
assert isinstance(function_args, dict), "执行动作时JSON必须为dict类型\n原JSON: {string}"
|
||||||
function = getattr(self.driver_instance, function_name)
|
function = getattr(self.driver_instance, function_name)
|
||||||
assert callable(
|
assert callable(
|
||||||
@@ -1660,14 +1757,30 @@ class BaseROS2DeviceNode(Node, Generic[T]):
|
|||||||
function
|
function
|
||||||
), f"执行动作时JSON中的function并非异步: {function_name}\n原JSON: {string}"
|
), f"执行动作时JSON中的function并非异步: {function_name}\n原JSON: {string}"
|
||||||
|
|
||||||
# 处理 ResourceSlot 类型参数
|
# 处理参数(包含 unilabos 系统参数如 sample_uuids)
|
||||||
args_list = default_manager._analyze_method_signature(function)["args"]
|
args_list = default_manager._analyze_method_signature(function, skip_unilabos_params=False)["args"]
|
||||||
for arg in args_list:
|
for arg in args_list:
|
||||||
arg_name = arg["name"]
|
arg_name = arg["name"]
|
||||||
arg_type = arg["type"]
|
arg_type = arg["type"]
|
||||||
|
|
||||||
# 跳过不在 function_args 中的参数
|
# 跳过不在 function_args 中的参数
|
||||||
if arg_name not in function_args:
|
if arg_name not in function_args:
|
||||||
|
# 处理 sample_uuids 参数注入
|
||||||
|
if arg_name == PARAM_SAMPLE_UUIDS:
|
||||||
|
raw_sample_uuids = unilabos_param.get(PARAM_SAMPLE_UUIDS, {})
|
||||||
|
# 将 material uuid 转换为 resource 实例
|
||||||
|
# key: sample_uuid, value: material_uuid -> resource 实例
|
||||||
|
resolved_sample_uuids: Dict[str, Any] = {}
|
||||||
|
for sample_uuid, material_uuid in raw_sample_uuids.items():
|
||||||
|
if material_uuid and self.resource_tracker:
|
||||||
|
resource = self.resource_tracker.uuid_to_resources.get(material_uuid)
|
||||||
|
resolved_sample_uuids[sample_uuid] = resource if resource else material_uuid
|
||||||
|
else:
|
||||||
|
resolved_sample_uuids[sample_uuid] = material_uuid
|
||||||
|
function_args[PARAM_SAMPLE_UUIDS] = resolved_sample_uuids
|
||||||
|
self.lab_logger().debug(
|
||||||
|
f"[JsonCommandAsync] 注入 {PARAM_SAMPLE_UUIDS}: {resolved_sample_uuids}"
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# 处理单个 ResourceSlot
|
# 处理单个 ResourceSlot
|
||||||
@@ -1757,6 +1870,15 @@ class ROS2DeviceNode:
|
|||||||
它不继承设备类,而是通过代理模式访问设备类的属性和方法。
|
它不继承设备类,而是通过代理模式访问设备类的属性和方法。
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# 类变量,用于循环管理
|
||||||
|
_asyncio_loop = None
|
||||||
|
_asyncio_loop_running = False
|
||||||
|
_asyncio_loop_thread = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_asyncio_loop(cls):
|
||||||
|
return cls._asyncio_loop
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def safe_task_wrapper(trace_callback, func, **kwargs):
|
async def safe_task_wrapper(trace_callback, func, **kwargs):
|
||||||
try:
|
try:
|
||||||
@@ -1833,6 +1955,11 @@ class ROS2DeviceNode:
|
|||||||
print_publish: 是否打印发布信息
|
print_publish: 是否打印发布信息
|
||||||
driver_is_ros:
|
driver_is_ros:
|
||||||
"""
|
"""
|
||||||
|
# 在初始化时检查循环状态
|
||||||
|
if ROS2DeviceNode._asyncio_loop_running and ROS2DeviceNode._asyncio_loop_thread is not None:
|
||||||
|
pass
|
||||||
|
elif ROS2DeviceNode._asyncio_loop_thread is None:
|
||||||
|
self._start_loop()
|
||||||
|
|
||||||
# 保存设备类是否支持异步上下文
|
# 保存设备类是否支持异步上下文
|
||||||
self._has_async_context = hasattr(driver_class, "__aenter__") and hasattr(driver_class, "__aexit__")
|
self._has_async_context = hasattr(driver_class, "__aenter__") and hasattr(driver_class, "__aexit__")
|
||||||
@@ -1924,6 +2051,19 @@ class ROS2DeviceNode:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
self._ros_node.lab_logger().error(f"设备后初始化失败: {e}")
|
self._ros_node.lab_logger().error(f"设备后初始化失败: {e}")
|
||||||
|
|
||||||
|
def _start_loop(self):
|
||||||
|
def run_event_loop():
|
||||||
|
loop = asyncio.new_event_loop()
|
||||||
|
ROS2DeviceNode._asyncio_loop = loop
|
||||||
|
asyncio.set_event_loop(loop)
|
||||||
|
loop.run_forever()
|
||||||
|
|
||||||
|
ROS2DeviceNode._asyncio_loop_thread = threading.Thread(
|
||||||
|
target=run_event_loop, daemon=True, name="ROS2DeviceNode"
|
||||||
|
)
|
||||||
|
ROS2DeviceNode._asyncio_loop_thread.start()
|
||||||
|
logger.info(f"循环线程已启动")
|
||||||
|
|
||||||
|
|
||||||
class DeviceInfoType(TypedDict):
|
class DeviceInfoType(TypedDict):
|
||||||
id: str
|
id: str
|
||||||
|
|||||||
@@ -1,16 +1,17 @@
|
|||||||
import collections
|
import collections
|
||||||
from dataclasses import dataclass, field
|
|
||||||
import json
|
import json
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import uuid
|
import uuid
|
||||||
from typing import TYPE_CHECKING, Optional, Dict, Any, List, ClassVar, Set, TypedDict, Union
|
from dataclasses import dataclass, field
|
||||||
|
from typing import TYPE_CHECKING, Optional, Dict, Any, List, ClassVar, Set, Union
|
||||||
|
|
||||||
from action_msgs.msg import GoalStatus
|
from action_msgs.msg import GoalStatus
|
||||||
from geometry_msgs.msg import Point
|
from geometry_msgs.msg import Point
|
||||||
from rclpy.action import ActionClient, get_action_server_names_and_types_by_node
|
from rclpy.action import ActionClient, get_action_server_names_and_types_by_node
|
||||||
from rclpy.service import Service
|
from rclpy.service import Service
|
||||||
|
from typing_extensions import TypedDict
|
||||||
from unilabos_msgs.msg import Resource # type: ignore
|
from unilabos_msgs.msg import Resource # type: ignore
|
||||||
from unilabos_msgs.srv import (
|
from unilabos_msgs.srv import (
|
||||||
ResourceAdd,
|
ResourceAdd,
|
||||||
@@ -22,10 +23,20 @@ from unilabos_msgs.srv import (
|
|||||||
from unilabos_msgs.srv._serial_command import SerialCommand_Request, SerialCommand_Response
|
from unilabos_msgs.srv._serial_command import SerialCommand_Request, SerialCommand_Response
|
||||||
from unique_identifier_msgs.msg import UUID
|
from unique_identifier_msgs.msg import UUID
|
||||||
|
|
||||||
|
from unilabos.registry.placeholder_type import ResourceSlot, DeviceSlot
|
||||||
from unilabos.registry.registry import lab_registry
|
from unilabos.registry.registry import lab_registry
|
||||||
from unilabos.resources.container import RegularContainer
|
from unilabos.resources.container import RegularContainer
|
||||||
from unilabos.resources.graphio import initialize_resource
|
from unilabos.resources.graphio import initialize_resource
|
||||||
from unilabos.resources.registry import add_schema
|
from unilabos.resources.registry import add_schema
|
||||||
|
from unilabos.resources.resource_tracker import (
|
||||||
|
ResourceDict,
|
||||||
|
ResourceDictInstance,
|
||||||
|
ResourceTreeSet,
|
||||||
|
ResourceTreeInstance,
|
||||||
|
RETURN_UNILABOS_SAMPLES,
|
||||||
|
JSON_UNILABOS_PARAM,
|
||||||
|
PARAM_SAMPLE_UUIDS,
|
||||||
|
)
|
||||||
from unilabos.ros.initialize_device import initialize_device_from_dict
|
from unilabos.ros.initialize_device import initialize_device_from_dict
|
||||||
from unilabos.ros.msgs.message_converter import (
|
from unilabos.ros.msgs.message_converter import (
|
||||||
get_msg_type,
|
get_msg_type,
|
||||||
@@ -36,17 +47,10 @@ from unilabos.ros.msgs.message_converter import (
|
|||||||
)
|
)
|
||||||
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode, ROS2DeviceNode, DeviceNodeResourceTracker
|
from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode, ROS2DeviceNode, DeviceNodeResourceTracker
|
||||||
from unilabos.ros.nodes.presets.controller_node import ControllerNode
|
from unilabos.ros.nodes.presets.controller_node import ControllerNode
|
||||||
from unilabos.resources.resource_tracker import (
|
|
||||||
ResourceDict,
|
|
||||||
ResourceDictInstance,
|
|
||||||
ResourceTreeSet,
|
|
||||||
ResourceTreeInstance,
|
|
||||||
)
|
|
||||||
from unilabos.utils import logger
|
from unilabos.utils import logger
|
||||||
from unilabos.utils.exception import DeviceClassInvalid
|
from unilabos.utils.exception import DeviceClassInvalid
|
||||||
from unilabos.utils.log import warning
|
from unilabos.utils.log import warning
|
||||||
from unilabos.utils.type_check import serialize_result_info
|
from unilabos.utils.type_check import serialize_result_info
|
||||||
from unilabos.registry.placeholder_type import ResourceSlot, DeviceSlot
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from unilabos.app.ws_client import QueueItem
|
from unilabos.app.ws_client import QueueItem
|
||||||
@@ -62,6 +66,18 @@ class TestResourceReturn(TypedDict):
|
|||||||
devices: List[DeviceSlot]
|
devices: List[DeviceSlot]
|
||||||
|
|
||||||
|
|
||||||
|
class TestLatencyReturn(TypedDict):
|
||||||
|
"""test_latency方法的返回值类型"""
|
||||||
|
|
||||||
|
avg_rtt_ms: float
|
||||||
|
avg_time_diff_ms: float
|
||||||
|
max_time_error_ms: float
|
||||||
|
task_delay_ms: float
|
||||||
|
raw_delay_ms: float
|
||||||
|
test_count: int
|
||||||
|
status: str
|
||||||
|
|
||||||
|
|
||||||
class HostNode(BaseROS2DeviceNode):
|
class HostNode(BaseROS2DeviceNode):
|
||||||
"""
|
"""
|
||||||
主机节点类,负责管理设备、资源和控制器
|
主机节点类,负责管理设备、资源和控制器
|
||||||
@@ -735,13 +751,14 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
if bCreate:
|
if bCreate:
|
||||||
self.lab_logger().trace(f"Status created: {device_id}.{property_name} = {msg.data}")
|
self.lab_logger().trace(f"Status created: {device_id}.{property_name} = {msg.data}")
|
||||||
else:
|
else:
|
||||||
self.lab_logger().debug(f"Status updated: {device_id}.{property_name} = {msg.data}")
|
self.lab_logger().trace(f"Status updated: {device_id}.{property_name} = {msg.data}")
|
||||||
|
|
||||||
def send_goal(
|
def send_goal(
|
||||||
self,
|
self,
|
||||||
item: "QueueItem",
|
item: "QueueItem",
|
||||||
action_type: str,
|
action_type: str,
|
||||||
action_kwargs: Dict[str, Any],
|
action_kwargs: Dict[str, Any],
|
||||||
|
sample_material: Dict[str, str],
|
||||||
server_info: Optional[Dict[str, Any]] = None,
|
server_info: Optional[Dict[str, Any]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
@@ -759,14 +776,14 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
if action_name.startswith("auto-"):
|
if action_name.startswith("auto-"):
|
||||||
action_name = action_name[5:]
|
action_name = action_name[5:]
|
||||||
action_id = f"/devices/{device_id}/_execute_driver_command"
|
action_id = f"/devices/{device_id}/_execute_driver_command"
|
||||||
action_kwargs = {
|
json_command: Dict[str, Any] = {
|
||||||
"string": json.dumps(
|
"function_name": action_name,
|
||||||
{
|
"function_args": action_kwargs,
|
||||||
"function_name": action_name,
|
JSON_UNILABOS_PARAM: {
|
||||||
"function_args": action_kwargs,
|
PARAM_SAMPLE_UUIDS: sample_material,
|
||||||
}
|
},
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
action_kwargs = {"string": json.dumps(json_command)}
|
||||||
if action_type.startswith("UniLabJsonCommandAsync"):
|
if action_type.startswith("UniLabJsonCommandAsync"):
|
||||||
action_id = f"/devices/{device_id}/_execute_driver_command_async"
|
action_id = f"/devices/{device_id}/_execute_driver_command_async"
|
||||||
else:
|
else:
|
||||||
@@ -777,24 +794,10 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
raise ValueError(f"ActionClient {action_id} not found.")
|
raise ValueError(f"ActionClient {action_id} not found.")
|
||||||
|
|
||||||
action_client: ActionClient = self._action_clients[action_id]
|
action_client: ActionClient = self._action_clients[action_id]
|
||||||
|
|
||||||
# 遍历action_kwargs下的所有子dict,将"sample_uuid"的值赋给"sample_id"
|
|
||||||
def assign_sample_id(obj):
|
|
||||||
if isinstance(obj, dict):
|
|
||||||
if "sample_uuid" in obj:
|
|
||||||
obj["sample_id"] = obj["sample_uuid"]
|
|
||||||
obj.pop("sample_uuid")
|
|
||||||
for k, v in obj.items():
|
|
||||||
if k != "unilabos_extra":
|
|
||||||
assign_sample_id(v)
|
|
||||||
elif isinstance(obj, list):
|
|
||||||
for item in obj:
|
|
||||||
assign_sample_id(item)
|
|
||||||
|
|
||||||
assign_sample_id(action_kwargs)
|
|
||||||
goal_msg = convert_to_ros_msg(action_client._action_type.Goal(), action_kwargs)
|
goal_msg = convert_to_ros_msg(action_client._action_type.Goal(), action_kwargs)
|
||||||
|
|
||||||
self.lab_logger().info(f"[Host Node] Sending goal for {action_id}: {str(goal_msg)[:1000]}")
|
# self.lab_logger().trace(f"[Host Node] Sending goal for {action_id}: {str(goal_msg)[:1000]}")
|
||||||
|
self.lab_logger().trace(f"[Host Node] Sending goal for {action_id}: {action_kwargs}")
|
||||||
self.lab_logger().trace(f"[Host Node] Sending goal for {action_id}: {goal_msg}")
|
self.lab_logger().trace(f"[Host Node] Sending goal for {action_id}: {goal_msg}")
|
||||||
action_client.wait_for_server()
|
action_client.wait_for_server()
|
||||||
goal_uuid_obj = UUID(uuid=list(u.bytes))
|
goal_uuid_obj = UUID(uuid=list(u.bytes))
|
||||||
@@ -853,9 +856,14 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
# 适配后端的一些额外处理
|
# 适配后端的一些额外处理
|
||||||
return_value = return_info.get("return_value")
|
return_value = return_info.get("return_value")
|
||||||
if isinstance(return_value, dict):
|
if isinstance(return_value, dict):
|
||||||
unilabos_samples = return_info.get("unilabos_samples")
|
unilabos_samples = return_value.pop(RETURN_UNILABOS_SAMPLES, None)
|
||||||
if isinstance(unilabos_samples, list):
|
if isinstance(unilabos_samples, list) and unilabos_samples:
|
||||||
return_info["unilabos_samples"] = unilabos_samples
|
self.lab_logger().info(
|
||||||
|
f"[Host Node] Job {job_id[:8]} returned {len(unilabos_samples)} sample(s): "
|
||||||
|
f"{[s.get('name', s.get('id', 'unknown')) if isinstance(s, dict) else str(s)[:20] for s in unilabos_samples[:5]]}"
|
||||||
|
f"{'...' if len(unilabos_samples) > 5 else ''}"
|
||||||
|
)
|
||||||
|
return_info["samples"] = unilabos_samples
|
||||||
suc = return_info.get("suc", False)
|
suc = return_info.get("suc", False)
|
||||||
if not suc:
|
if not suc:
|
||||||
status = "failed"
|
status = "failed"
|
||||||
@@ -881,7 +889,7 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
# 清理 _goals 中的记录
|
# 清理 _goals 中的记录
|
||||||
if job_id in self._goals:
|
if job_id in self._goals:
|
||||||
del self._goals[job_id]
|
del self._goals[job_id]
|
||||||
self.lab_logger().debug(f"[Host Node] Removed goal {job_id[:8]} from _goals")
|
self.lab_logger().trace(f"[Host Node] Removed goal {job_id[:8]} from _goals")
|
||||||
|
|
||||||
# 存储结果供 HTTP API 查询
|
# 存储结果供 HTTP API 查询
|
||||||
try:
|
try:
|
||||||
@@ -1161,7 +1169,7 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
"""
|
"""
|
||||||
更新节点信息回调
|
更新节点信息回调
|
||||||
"""
|
"""
|
||||||
# self.lab_logger().info(f"[Host Node] Node info update request received: {request}")
|
self.lab_logger().trace(f"[Host Node] Node info update request received: {request}")
|
||||||
try:
|
try:
|
||||||
from unilabos.app.communication import get_communication_client
|
from unilabos.app.communication import get_communication_client
|
||||||
from unilabos.app.web.client import HTTPClient, http_client
|
from unilabos.app.web.client import HTTPClient, http_client
|
||||||
@@ -1326,10 +1334,20 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
self.lab_logger().debug(f"[Host Node-Resource] List parameters: {request}")
|
self.lab_logger().debug(f"[Host Node-Resource] List parameters: {request}")
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def test_latency(self):
|
def test_latency(self) -> TestLatencyReturn:
|
||||||
"""
|
"""
|
||||||
测试网络延迟的action实现
|
测试网络延迟的action实现
|
||||||
通过5次ping-pong机制校对时间误差并计算实际延迟
|
通过5次ping-pong机制校对时间误差并计算实际延迟
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
TestLatencyReturn: 包含延迟测试结果的字典,包括:
|
||||||
|
- avg_rtt_ms: 平均往返时间(毫秒)
|
||||||
|
- avg_time_diff_ms: 平均时间差(毫秒)
|
||||||
|
- max_time_error_ms: 最大时间误差(毫秒)
|
||||||
|
- task_delay_ms: 实际任务延迟(毫秒),-1表示无法计算
|
||||||
|
- raw_delay_ms: 原始时间差(毫秒),-1表示无法计算
|
||||||
|
- test_count: 有效测试次数
|
||||||
|
- status: 测试状态,"success"表示成功,"all_timeout"表示全部超时
|
||||||
"""
|
"""
|
||||||
import uuid as uuid_module
|
import uuid as uuid_module
|
||||||
|
|
||||||
@@ -1392,7 +1410,15 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
|
|
||||||
if not ping_results:
|
if not ping_results:
|
||||||
self.lab_logger().error("❌ 所有ping-pong测试都失败了")
|
self.lab_logger().error("❌ 所有ping-pong测试都失败了")
|
||||||
return {"status": "all_timeout"}
|
return {
|
||||||
|
"avg_rtt_ms": -1.0,
|
||||||
|
"avg_time_diff_ms": -1.0,
|
||||||
|
"max_time_error_ms": -1.0,
|
||||||
|
"task_delay_ms": -1.0,
|
||||||
|
"raw_delay_ms": -1.0,
|
||||||
|
"test_count": 0,
|
||||||
|
"status": "all_timeout",
|
||||||
|
}
|
||||||
|
|
||||||
# 统计分析
|
# 统计分析
|
||||||
rtts = [r["rtt_ms"] for r in ping_results]
|
rtts = [r["rtt_ms"] for r in ping_results]
|
||||||
@@ -1400,7 +1426,7 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
|
|
||||||
avg_rtt_ms = sum(rtts) / len(rtts)
|
avg_rtt_ms = sum(rtts) / len(rtts)
|
||||||
avg_time_diff_ms = sum(time_diffs) / len(time_diffs)
|
avg_time_diff_ms = sum(time_diffs) / len(time_diffs)
|
||||||
max_time_diff_error_ms = max(abs(min(time_diffs)), abs(max(time_diffs)))
|
max_time_diff_error_ms: float = max(abs(min(time_diffs)), abs(max(time_diffs)))
|
||||||
|
|
||||||
self.lab_logger().info("-" * 50)
|
self.lab_logger().info("-" * 50)
|
||||||
self.lab_logger().info("[测试统计]")
|
self.lab_logger().info("[测试统计]")
|
||||||
@@ -1440,7 +1466,7 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
|
|
||||||
self.lab_logger().info("=" * 60)
|
self.lab_logger().info("=" * 60)
|
||||||
|
|
||||||
return {
|
res: TestLatencyReturn = {
|
||||||
"avg_rtt_ms": avg_rtt_ms,
|
"avg_rtt_ms": avg_rtt_ms,
|
||||||
"avg_time_diff_ms": avg_time_diff_ms,
|
"avg_time_diff_ms": avg_time_diff_ms,
|
||||||
"max_time_error_ms": max_time_diff_error_ms,
|
"max_time_error_ms": max_time_diff_error_ms,
|
||||||
@@ -1451,9 +1477,14 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
"test_count": len(ping_results),
|
"test_count": len(ping_results),
|
||||||
"status": "success",
|
"status": "success",
|
||||||
}
|
}
|
||||||
|
return res
|
||||||
|
|
||||||
def test_resource(
|
def test_resource(
|
||||||
self, resource: ResourceSlot = None, resources: List[ResourceSlot] = None, device: DeviceSlot = None, devices: List[DeviceSlot] = None
|
self,
|
||||||
|
resource: ResourceSlot = None,
|
||||||
|
resources: List[ResourceSlot] = None,
|
||||||
|
device: DeviceSlot = None,
|
||||||
|
devices: List[DeviceSlot] = None,
|
||||||
) -> TestResourceReturn:
|
) -> TestResourceReturn:
|
||||||
if resources is None:
|
if resources is None:
|
||||||
resources = []
|
resources = []
|
||||||
@@ -1514,7 +1545,9 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
|
|
||||||
# 构建服务地址
|
# 构建服务地址
|
||||||
srv_address = f"/srv{namespace}/s2c_resource_tree"
|
srv_address = f"/srv{namespace}/s2c_resource_tree"
|
||||||
self.lab_logger().trace(f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation started -------")
|
self.lab_logger().trace(
|
||||||
|
f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation started -------"
|
||||||
|
)
|
||||||
|
|
||||||
# 创建服务客户端
|
# 创建服务客户端
|
||||||
sclient = self.create_client(SerialCommand, srv_address)
|
sclient = self.create_client(SerialCommand, srv_address)
|
||||||
@@ -1549,7 +1582,9 @@ class HostNode(BaseROS2DeviceNode):
|
|||||||
time.sleep(0.05)
|
time.sleep(0.05)
|
||||||
|
|
||||||
response = future.result()
|
response = future.result()
|
||||||
self.lab_logger().trace(f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation completed -------")
|
self.lab_logger().trace(
|
||||||
|
f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation completed -------"
|
||||||
|
)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -6,8 +6,6 @@ from typing import List, Dict, Any, Optional, TYPE_CHECKING
|
|||||||
|
|
||||||
import rclpy
|
import rclpy
|
||||||
from rosidl_runtime_py import message_to_ordereddict
|
from rosidl_runtime_py import message_to_ordereddict
|
||||||
from unilabos_msgs.msg import Resource
|
|
||||||
from unilabos_msgs.srv import ResourceUpdate
|
|
||||||
|
|
||||||
from unilabos.messages import * # type: ignore # protocol names
|
from unilabos.messages import * # type: ignore # protocol names
|
||||||
from rclpy.action import ActionServer, ActionClient
|
from rclpy.action import ActionServer, ActionClient
|
||||||
@@ -15,7 +13,6 @@ from rclpy.action.server import ServerGoalHandle
|
|||||||
from unilabos_msgs.srv._serial_command import SerialCommand_Request, SerialCommand_Response
|
from unilabos_msgs.srv._serial_command import SerialCommand_Request, SerialCommand_Response
|
||||||
|
|
||||||
from unilabos.compile import action_protocol_generators
|
from unilabos.compile import action_protocol_generators
|
||||||
from unilabos.resources.graphio import nested_dict_to_list
|
|
||||||
from unilabos.ros.initialize_device import initialize_device_from_dict
|
from unilabos.ros.initialize_device import initialize_device_from_dict
|
||||||
from unilabos.ros.msgs.message_converter import (
|
from unilabos.ros.msgs.message_converter import (
|
||||||
get_action_type,
|
get_action_type,
|
||||||
@@ -231,15 +228,15 @@ class ROS2WorkstationNode(BaseROS2DeviceNode):
|
|||||||
try:
|
try:
|
||||||
# 统一处理单个或多个资源
|
# 统一处理单个或多个资源
|
||||||
resource_id = (
|
resource_id = (
|
||||||
protocol_kwargs[k]["id"] if v == "unilabos_msgs/Resource" else protocol_kwargs[k][0]["id"]
|
protocol_kwargs[k]["id"]
|
||||||
|
if v == "unilabos_msgs/Resource"
|
||||||
|
else protocol_kwargs[k][0]["id"]
|
||||||
)
|
)
|
||||||
resource_uuid = protocol_kwargs[k].get("uuid", None)
|
resource_uuid = protocol_kwargs[k].get("uuid", None)
|
||||||
r = SerialCommand_Request()
|
r = SerialCommand_Request()
|
||||||
r.command = json.dumps({"id": resource_id, "uuid": resource_uuid, "with_children": True})
|
r.command = json.dumps({"id": resource_id, "uuid": resource_uuid, "with_children": True})
|
||||||
# 发送请求并等待响应
|
# 发送请求并等待响应
|
||||||
response: SerialCommand_Response = await self._resource_clients[
|
response: SerialCommand_Response = await self._resource_clients["resource_get"].call_async(
|
||||||
"resource_get"
|
|
||||||
].call_async(
|
|
||||||
r
|
r
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
raw_data = json.loads(response.response)
|
raw_data = json.loads(response.response)
|
||||||
@@ -307,12 +304,54 @@ class ROS2WorkstationNode(BaseROS2DeviceNode):
|
|||||||
|
|
||||||
# 向Host更新物料当前状态
|
# 向Host更新物料当前状态
|
||||||
for k, v in goal.get_fields_and_field_types().items():
|
for k, v in goal.get_fields_and_field_types().items():
|
||||||
if v in ["unilabos_msgs/Resource", "sequence<unilabos_msgs/Resource>"]:
|
if v not in ["unilabos_msgs/Resource", "sequence<unilabos_msgs/Resource>"]:
|
||||||
r = ResourceUpdate.Request()
|
continue
|
||||||
r.resources = [
|
self.lab_logger().info(f"更新资源状态: {k}")
|
||||||
convert_to_ros_msg(Resource, rs) for rs in nested_dict_to_list(protocol_kwargs[k])
|
try:
|
||||||
]
|
# 去重:使用 seen 集合获取唯一的资源对象
|
||||||
response = await self._resource_clients["resource_update"].call_async(r)
|
seen = set()
|
||||||
|
unique_resources = []
|
||||||
|
|
||||||
|
# 获取资源数据,统一转换为列表
|
||||||
|
resource_data = protocol_kwargs[k]
|
||||||
|
is_sequence = v != "unilabos_msgs/Resource"
|
||||||
|
if not is_sequence:
|
||||||
|
resource_list = [resource_data] if isinstance(resource_data, dict) else resource_data
|
||||||
|
else:
|
||||||
|
# 处理序列类型,可能是嵌套列表
|
||||||
|
resource_list = []
|
||||||
|
if isinstance(resource_data, list):
|
||||||
|
for item in resource_data:
|
||||||
|
if isinstance(item, list):
|
||||||
|
resource_list.extend(item)
|
||||||
|
else:
|
||||||
|
resource_list.append(item)
|
||||||
|
else:
|
||||||
|
resource_list = [resource_data]
|
||||||
|
|
||||||
|
for res_data in resource_list:
|
||||||
|
if not isinstance(res_data, dict):
|
||||||
|
continue
|
||||||
|
res_name = res_data.get("id") or res_data.get("name")
|
||||||
|
if not res_name:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 使用 resource_tracker 获取本地 PLR 实例
|
||||||
|
plr = self.resource_tracker.figure_resource({"name": res_name}, try_mode=False)
|
||||||
|
# 获取父资源
|
||||||
|
res = self.resource_tracker.parent_resource(plr)
|
||||||
|
if res is None:
|
||||||
|
res = plr
|
||||||
|
if id(res) not in seen:
|
||||||
|
seen.add(id(res))
|
||||||
|
unique_resources.append(res)
|
||||||
|
|
||||||
|
# 使用新的资源树接口更新
|
||||||
|
if unique_resources:
|
||||||
|
await self.update_resource(unique_resources)
|
||||||
|
except Exception as e:
|
||||||
|
self.lab_logger().error(f"资源更新失败: {e}")
|
||||||
|
self.lab_logger().error(traceback.format_exc())
|
||||||
|
|
||||||
# 设置成功状态和返回值
|
# 设置成功状态和返回值
|
||||||
execution_success = True
|
execution_success = True
|
||||||
|
|||||||
@@ -52,7 +52,8 @@ class DeviceClassCreator(Generic[T]):
|
|||||||
if self.device_instance is not None:
|
if self.device_instance is not None:
|
||||||
for c in self.children:
|
for c in self.children:
|
||||||
if c.res_content.type != "device":
|
if c.res_content.type != "device":
|
||||||
self.resource_tracker.add_resource(c.get_plr_nested_dict())
|
res = ResourceTreeSet([ResourceTreeInstance(c)]).to_plr_resources()[0]
|
||||||
|
self.resource_tracker.add_resource(res)
|
||||||
|
|
||||||
def create_instance(self, data: Dict[str, Any]) -> T:
|
def create_instance(self, data: Dict[str, Any]) -> T:
|
||||||
"""
|
"""
|
||||||
@@ -119,7 +120,7 @@ class PyLabRobotCreator(DeviceClassCreator[T]):
|
|||||||
# return resource, source_type
|
# return resource, source_type
|
||||||
|
|
||||||
def _process_resource_references(
|
def _process_resource_references(
|
||||||
self, data: Any, to_dict=False, states=None, prefix_path="", name_to_uuid=None
|
self, data: Any, processed_child_names: Optional[Dict[str, Any]], to_dict=False, states=None, prefix_path="", name_to_uuid=None
|
||||||
) -> Any:
|
) -> Any:
|
||||||
"""
|
"""
|
||||||
递归处理资源引用,替换_resource_child_name对应的资源
|
递归处理资源引用,替换_resource_child_name对应的资源
|
||||||
@@ -164,6 +165,7 @@ class PyLabRobotCreator(DeviceClassCreator[T]):
|
|||||||
states[prefix_path] = resource_instance.serialize_all_state()
|
states[prefix_path] = resource_instance.serialize_all_state()
|
||||||
return serialized
|
return serialized
|
||||||
else:
|
else:
|
||||||
|
processed_child_names[child_name] = resource_instance
|
||||||
self.resource_tracker.add_resource(resource_instance)
|
self.resource_tracker.add_resource(resource_instance)
|
||||||
# 立即设置UUID,state已经在resource_ulab_to_plr中处理过了
|
# 立即设置UUID,state已经在resource_ulab_to_plr中处理过了
|
||||||
if name_to_uuid:
|
if name_to_uuid:
|
||||||
@@ -182,12 +184,12 @@ class PyLabRobotCreator(DeviceClassCreator[T]):
|
|||||||
result = {}
|
result = {}
|
||||||
for key, value in data.items():
|
for key, value in data.items():
|
||||||
new_prefix = f"{prefix_path}.{key}" if prefix_path else key
|
new_prefix = f"{prefix_path}.{key}" if prefix_path else key
|
||||||
result[key] = self._process_resource_references(value, to_dict, states, new_prefix, name_to_uuid)
|
result[key] = self._process_resource_references(value, processed_child_names, to_dict, states, new_prefix, name_to_uuid)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
elif isinstance(data, list):
|
elif isinstance(data, list):
|
||||||
return [
|
return [
|
||||||
self._process_resource_references(item, to_dict, states, f"{prefix_path}[{i}]", name_to_uuid)
|
self._process_resource_references(item, processed_child_names, to_dict, states, f"{prefix_path}[{i}]", name_to_uuid)
|
||||||
for i, item in enumerate(data)
|
for i, item in enumerate(data)
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -234,7 +236,7 @@ class PyLabRobotCreator(DeviceClassCreator[T]):
|
|||||||
# 首先处理资源引用
|
# 首先处理资源引用
|
||||||
states = {}
|
states = {}
|
||||||
processed_data = self._process_resource_references(
|
processed_data = self._process_resource_references(
|
||||||
data, to_dict=True, states=states, name_to_uuid=name_to_uuid
|
data, {}, to_dict=True, states=states, name_to_uuid=name_to_uuid
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -270,7 +272,12 @@ class PyLabRobotCreator(DeviceClassCreator[T]):
|
|||||||
arg_value = spec_args[param_name].annotation
|
arg_value = spec_args[param_name].annotation
|
||||||
data[param_name]["_resource_type"] = self.device_cls.__module__ + ":" + arg_value
|
data[param_name]["_resource_type"] = self.device_cls.__module__ + ":" + arg_value
|
||||||
logger.debug(f"自动补充 _resource_type: {data[param_name]['_resource_type']}")
|
logger.debug(f"自动补充 _resource_type: {data[param_name]['_resource_type']}")
|
||||||
processed_data = self._process_resource_references(data, to_dict=False, name_to_uuid=name_to_uuid)
|
processed_child_names = {}
|
||||||
|
processed_data = self._process_resource_references(data, processed_child_names, to_dict=False, name_to_uuid=name_to_uuid)
|
||||||
|
for child_name, resource_instance in processed_data.items():
|
||||||
|
for ind, name in enumerate([child.res_content.name for child in self.children]):
|
||||||
|
if name == child_name:
|
||||||
|
self.children.pop(ind)
|
||||||
self.device_instance = super(PyLabRobotCreator, self).create_instance(processed_data) # 补全变量后直接调用,调用的自身的attach_resource
|
self.device_instance = super(PyLabRobotCreator, self).create_instance(processed_data) # 补全变量后直接调用,调用的自身的attach_resource
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"PyLabRobot创建实例失败: {e}")
|
logger.error(f"PyLabRobot创建实例失败: {e}")
|
||||||
@@ -342,9 +349,10 @@ class WorkstationNodeCreator(DeviceClassCreator[T]):
|
|||||||
try:
|
try:
|
||||||
# 创建实例,额外补充一个给protocol node的字段,后面考虑取消
|
# 创建实例,额外补充一个给protocol node的字段,后面考虑取消
|
||||||
data["children"] = self.children
|
data["children"] = self.children
|
||||||
for child in self.children:
|
# super(WorkstationNodeCreator, self).create_instance(data)的时候会attach
|
||||||
if child.res_content.type != "device":
|
# for child in self.children:
|
||||||
self.resource_tracker.add_resource(child.get_plr_nested_dict())
|
# if child.res_content.type != "device":
|
||||||
|
# self.resource_tracker.add_resource(child.get_plr_nested_dict())
|
||||||
deck_dict = data.get("deck")
|
deck_dict = data.get("deck")
|
||||||
if deck_dict:
|
if deck_dict:
|
||||||
from pylabrobot.resources import Deck, Resource
|
from pylabrobot.resources import Deck, Resource
|
||||||
|
|||||||
@@ -339,13 +339,8 @@
|
|||||||
"z": 0
|
"z": 0
|
||||||
},
|
},
|
||||||
"config": {
|
"config": {
|
||||||
"max_volume": 500.0,
|
|
||||||
"type": "RegularContainer",
|
"type": "RegularContainer",
|
||||||
"category": "container",
|
"category": "container"
|
||||||
"max_temp": 200.0,
|
|
||||||
"min_temp": -20.0,
|
|
||||||
"has_stirrer": true,
|
|
||||||
"has_heater": true
|
|
||||||
},
|
},
|
||||||
"data": {
|
"data": {
|
||||||
"liquids": [],
|
"liquids": [],
|
||||||
@@ -769,9 +764,7 @@
|
|||||||
"size_y": 250,
|
"size_y": 250,
|
||||||
"size_z": 0,
|
"size_z": 0,
|
||||||
"type": "RegularContainer",
|
"type": "RegularContainer",
|
||||||
"category": "container",
|
"category": "container"
|
||||||
"reagent": "sodium_chloride",
|
|
||||||
"physical_state": "solid"
|
|
||||||
},
|
},
|
||||||
"data": {
|
"data": {
|
||||||
"current_mass": 500.0,
|
"current_mass": 500.0,
|
||||||
@@ -792,14 +785,11 @@
|
|||||||
"z": 0
|
"z": 0
|
||||||
},
|
},
|
||||||
"config": {
|
"config": {
|
||||||
"volume": 500.0,
|
|
||||||
"size_x": 600,
|
"size_x": 600,
|
||||||
"size_y": 250,
|
"size_y": 250,
|
||||||
"size_z": 0,
|
"size_z": 0,
|
||||||
"type": "RegularContainer",
|
"type": "RegularContainer",
|
||||||
"category": "container",
|
"category": "container"
|
||||||
"reagent": "sodium_carbonate",
|
|
||||||
"physical_state": "solid"
|
|
||||||
},
|
},
|
||||||
"data": {
|
"data": {
|
||||||
"current_mass": 500.0,
|
"current_mass": 500.0,
|
||||||
@@ -820,14 +810,11 @@
|
|||||||
"z": 0
|
"z": 0
|
||||||
},
|
},
|
||||||
"config": {
|
"config": {
|
||||||
"volume": 500.0,
|
|
||||||
"size_x": 650,
|
"size_x": 650,
|
||||||
"size_y": 250,
|
"size_y": 250,
|
||||||
"size_z": 0,
|
"size_z": 0,
|
||||||
"type": "RegularContainer",
|
"type": "RegularContainer",
|
||||||
"category": "container",
|
"category": "container"
|
||||||
"reagent": "magnesium_chloride",
|
|
||||||
"physical_state": "solid"
|
|
||||||
},
|
},
|
||||||
"data": {
|
"data": {
|
||||||
"current_mass": 500.0,
|
"current_mass": 500.0,
|
||||||
|
|||||||
837
unilabos/test/experiments/prcxi_9320_no_res.json
Normal file
837
unilabos/test/experiments/prcxi_9320_no_res.json
Normal file
@@ -0,0 +1,837 @@
|
|||||||
|
{
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "PRCXI",
|
||||||
|
"name": "PRCXI",
|
||||||
|
"type": "device",
|
||||||
|
"class": "liquid_handler.prcxi",
|
||||||
|
"parent": "",
|
||||||
|
"pose": {
|
||||||
|
"size": {
|
||||||
|
"width": 550,
|
||||||
|
"height": 400,
|
||||||
|
"depth": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"axis": "Left",
|
||||||
|
"deck": {
|
||||||
|
"_resource_type": "unilabos.devices.liquid_handling.prcxi.prcxi:PRCXI9300Deck",
|
||||||
|
"_resource_child_name": "PRCXI_Deck"
|
||||||
|
},
|
||||||
|
"host": "10.20.30.184",
|
||||||
|
"port": 9999,
|
||||||
|
"debug": false,
|
||||||
|
"setup": true,
|
||||||
|
"is_9320": true,
|
||||||
|
"timeout": 10,
|
||||||
|
"matrix_id": "5de524d0-3f95-406c-86dd-f83626ebc7cb",
|
||||||
|
"simulator": false,
|
||||||
|
"step_mode": false,
|
||||||
|
"channel_num": 2
|
||||||
|
},
|
||||||
|
"data": {
|
||||||
|
"reset_ok": true
|
||||||
|
},
|
||||||
|
"schema": {},
|
||||||
|
"description": "",
|
||||||
|
"model": null,
|
||||||
|
"position": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 700,
|
||||||
|
"z": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "PRCXI_Deck",
|
||||||
|
"name": "PRCXI_Deck",
|
||||||
|
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI",
|
||||||
|
"type": "deck",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Deck",
|
||||||
|
"size_x": 550,
|
||||||
|
"size_y": 400,
|
||||||
|
"size_z": 17,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "deck",
|
||||||
|
"barcode": null
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T1",
|
||||||
|
"name": "T1",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 5,
|
||||||
|
"y": 301,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T1",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T2",
|
||||||
|
"name": "T2",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 142.5,
|
||||||
|
"y": 301,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T2",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T3",
|
||||||
|
"name": "T3",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 280,
|
||||||
|
"y": 301,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T3",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T4",
|
||||||
|
"name": "T4",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 417.5,
|
||||||
|
"y": 301,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 94,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T4",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T5",
|
||||||
|
"name": "T5",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 5,
|
||||||
|
"y": 205,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T5",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T6",
|
||||||
|
"name": "T6",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 142.5,
|
||||||
|
"y": 205,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T6",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T7",
|
||||||
|
"name": "T7",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 280,
|
||||||
|
"y": 205,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T7",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T8",
|
||||||
|
"name": "T8",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 417.5,
|
||||||
|
"y": 205,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T8",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T9",
|
||||||
|
"name": "T9",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 5,
|
||||||
|
"y": 109,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T9",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T10",
|
||||||
|
"name": "T10",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 142.5,
|
||||||
|
"y": 109,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T10",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T11",
|
||||||
|
"name": "T11",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 280,
|
||||||
|
"y": 109,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T11",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T12",
|
||||||
|
"name": "T12",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 417.5,
|
||||||
|
"y": 109,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T12",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T13",
|
||||||
|
"name": "T13",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 5,
|
||||||
|
"y": 13,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T13",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T14",
|
||||||
|
"name": "T14",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 142.5,
|
||||||
|
"y": 13,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T14",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T15",
|
||||||
|
"name": "T15",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 280,
|
||||||
|
"y": 13,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T15",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T16",
|
||||||
|
"name": "T16",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 417.5,
|
||||||
|
"y": 13,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 28,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T16",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "trash",
|
||||||
|
"name": "trash",
|
||||||
|
|
||||||
|
"children": [],
|
||||||
|
"parent": "T16",
|
||||||
|
"type": "trash",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Trash",
|
||||||
|
"size_x": 127.5,
|
||||||
|
"size_y": 86,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "trash",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"max_volume": "Infinity",
|
||||||
|
"material_z_thickness": 0,
|
||||||
|
"compute_volume_from_height": null,
|
||||||
|
"compute_height_from_volume": null
|
||||||
|
},
|
||||||
|
"data": {
|
||||||
|
"liquids": [],
|
||||||
|
"pending_liquids": [],
|
||||||
|
"liquid_history": [],
|
||||||
|
"Material": {
|
||||||
|
"uuid": "730067cf07ae43849ddf4034299030e9"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"edges": []
|
||||||
|
}
|
||||||
795
unilabos/test/experiments/prcxi_9320_slim.json
Normal file
795
unilabos/test/experiments/prcxi_9320_slim.json
Normal file
@@ -0,0 +1,795 @@
|
|||||||
|
{
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"id": "PRCXI",
|
||||||
|
"name": "PRCXI",
|
||||||
|
"type": "device",
|
||||||
|
"class": "liquid_handler.prcxi",
|
||||||
|
"parent": "",
|
||||||
|
"pose": {
|
||||||
|
"size": {
|
||||||
|
"width": 562,
|
||||||
|
"height": 394,
|
||||||
|
"depth": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"axis": "Left",
|
||||||
|
"deck": {
|
||||||
|
"_resource_type": "unilabos.devices.liquid_handling.prcxi.prcxi:PRCXI9300Deck",
|
||||||
|
"_resource_child_name": "PRCXI_Deck"
|
||||||
|
},
|
||||||
|
"host": "10.20.30.184",
|
||||||
|
"port": 9999,
|
||||||
|
"debug": true,
|
||||||
|
"setup": true,
|
||||||
|
"is_9320": true,
|
||||||
|
"timeout": 10,
|
||||||
|
"matrix_id": "5de524d0-3f95-406c-86dd-f83626ebc7cb",
|
||||||
|
"simulator": true,
|
||||||
|
"channel_num": 2
|
||||||
|
},
|
||||||
|
"data": {
|
||||||
|
"reset_ok": true
|
||||||
|
},
|
||||||
|
"schema": {},
|
||||||
|
"description": "",
|
||||||
|
"model": null,
|
||||||
|
"position": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 240,
|
||||||
|
"z": 0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "PRCXI_Deck",
|
||||||
|
"name": "PRCXI_Deck",
|
||||||
|
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI",
|
||||||
|
"type": "deck",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 10,
|
||||||
|
"y": 10,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Deck",
|
||||||
|
"size_x": 542,
|
||||||
|
"size_y": 374,
|
||||||
|
"size_z": 0,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "deck",
|
||||||
|
"barcode": null
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T1",
|
||||||
|
"name": "T1",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 288,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T1",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T2",
|
||||||
|
"name": "T2",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 138,
|
||||||
|
"y": 288,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T2",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T3",
|
||||||
|
"name": "T3",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 276,
|
||||||
|
"y": 288,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T3",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T4",
|
||||||
|
"name": "T4",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 414,
|
||||||
|
"y": 288,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T4",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T5",
|
||||||
|
"name": "T5",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 192,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T5",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T6",
|
||||||
|
"name": "T6",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 138,
|
||||||
|
"y": 192,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T6",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T7",
|
||||||
|
"name": "T7",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 276,
|
||||||
|
"y": 192,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T7",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T8",
|
||||||
|
"name": "T8",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 414,
|
||||||
|
"y": 192,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T8",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T9",
|
||||||
|
"name": "T9",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 96,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T9",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T10",
|
||||||
|
"name": "T10",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 138,
|
||||||
|
"y": 96,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T10",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T11",
|
||||||
|
"name": "T11",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 276,
|
||||||
|
"y": 96,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T11",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T12",
|
||||||
|
"name": "T12",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 414,
|
||||||
|
"y": 96,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T12",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T13",
|
||||||
|
"name": "T13",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T13",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T14",
|
||||||
|
"name": "T14",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 138,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T14",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T15",
|
||||||
|
"name": "T15",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 276,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T15",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"id": "T16",
|
||||||
|
"name": "T16",
|
||||||
|
"children": [],
|
||||||
|
"parent": "PRCXI_Deck",
|
||||||
|
"type": "plate",
|
||||||
|
"class": "",
|
||||||
|
"position": {
|
||||||
|
"x": 414,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0
|
||||||
|
},
|
||||||
|
"config": {
|
||||||
|
"type": "PRCXI9300Container",
|
||||||
|
"size_x": 127,
|
||||||
|
"size_y": 85.5,
|
||||||
|
"size_z": 10,
|
||||||
|
"rotation": {
|
||||||
|
"x": 0,
|
||||||
|
"y": 0,
|
||||||
|
"z": 0,
|
||||||
|
"type": "Rotation"
|
||||||
|
},
|
||||||
|
"category": "plate",
|
||||||
|
"model": null,
|
||||||
|
"barcode": null,
|
||||||
|
"ordering": {},
|
||||||
|
"sites": [
|
||||||
|
{
|
||||||
|
"label": "T16",
|
||||||
|
"visible": true,
|
||||||
|
"position": { "x": 0, "y": 0, "z": 0 },
|
||||||
|
"size": { "width": 128.0, "height": 86, "depth": 0 },
|
||||||
|
"content_type": [
|
||||||
|
"plate",
|
||||||
|
"tip_rack",
|
||||||
|
"plates",
|
||||||
|
"tip_racks",
|
||||||
|
"tube_rack"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"data": {}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"edges": []
|
||||||
|
}
|
||||||
@@ -221,7 +221,7 @@
|
|||||||
"type": "PRCXI9300PlateAdapterSite",
|
"type": "PRCXI9300PlateAdapterSite",
|
||||||
"size_x": 127.5,
|
"size_x": 127.5,
|
||||||
"size_y": 86,
|
"size_y": 86,
|
||||||
"size_z": 97,
|
"size_z": 94,
|
||||||
"rotation": {
|
"rotation": {
|
||||||
"x": 0,
|
"x": 0,
|
||||||
"y": 0,
|
"y": 0,
|
||||||
@@ -16283,7 +16283,7 @@
|
|||||||
"size_y": 85.8,
|
"size_y": 85.8,
|
||||||
"size_z": 42.66,
|
"size_z": 42.66,
|
||||||
"barcode": null,
|
"barcode": null,
|
||||||
"category": null,
|
"category": "tube_rack",
|
||||||
"ordering": {
|
"ordering": {
|
||||||
"A1": "PlateT6_0_0",
|
"A1": "PlateT6_0_0",
|
||||||
"A2": "PlateT6_1_0",
|
"A2": "PlateT6_1_0",
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ class EnvironmentChecker:
|
|||||||
"msgcenterpy": "msgcenterpy",
|
"msgcenterpy": "msgcenterpy",
|
||||||
"opentrons_shared_data": "opentrons_shared_data",
|
"opentrons_shared_data": "opentrons_shared_data",
|
||||||
"typing_extensions": "typing_extensions",
|
"typing_extensions": "typing_extensions",
|
||||||
|
"crcmod": "crcmod-plus",
|
||||||
}
|
}
|
||||||
|
|
||||||
# 特殊安装包(需要特殊处理的包)
|
# 特殊安装包(需要特殊处理的包)
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ __all__ = [
|
|||||||
|
|
||||||
from ast import Constant
|
from ast import Constant
|
||||||
|
|
||||||
|
from unilabos.resources.resource_tracker import PARAM_SAMPLE_UUIDS
|
||||||
from unilabos.utils import logger
|
from unilabos.utils import logger
|
||||||
|
|
||||||
|
|
||||||
@@ -334,13 +335,18 @@ class ImportManager:
|
|||||||
result["action_methods"][method_name] = method_info
|
result["action_methods"][method_name] = method_info
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def _analyze_method_signature(self, method) -> Dict[str, Any]:
|
def _analyze_method_signature(self, method, skip_unilabos_params: bool = True) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
分析方法签名,提取具体的命名参数信息
|
分析方法签名,提取具体的命名参数信息
|
||||||
|
|
||||||
注意:此方法会跳过*args和**kwargs,只提取具体的命名参数
|
注意:此方法会跳过*args和**kwargs,只提取具体的命名参数
|
||||||
这样可以确保通过**dict方式传参时的准确性
|
这样可以确保通过**dict方式传参时的准确性
|
||||||
|
|
||||||
|
Args:
|
||||||
|
method: 要分析的方法
|
||||||
|
skip_unilabos_params: 是否跳过 unilabos 系统参数(如 sample_uuids),
|
||||||
|
registry 补全时为 True,JsonCommand 执行时为 False
|
||||||
|
|
||||||
示例用法:
|
示例用法:
|
||||||
method_info = self._analyze_method_signature(some_method)
|
method_info = self._analyze_method_signature(some_method)
|
||||||
params = {"param1": "value1", "param2": "value2"}
|
params = {"param1": "value1", "param2": "value2"}
|
||||||
@@ -361,6 +367,10 @@ class ImportManager:
|
|||||||
if param.kind == param.VAR_KEYWORD: # **kwargs
|
if param.kind == param.VAR_KEYWORD: # **kwargs
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# 跳过 sample_uuids 参数(由系统自动注入,registry 补全时跳过)
|
||||||
|
if skip_unilabos_params and param_name == PARAM_SAMPLE_UUIDS:
|
||||||
|
continue
|
||||||
|
|
||||||
is_required = param.default == inspect.Parameter.empty
|
is_required = param.default == inspect.Parameter.empty
|
||||||
if is_required:
|
if is_required:
|
||||||
num_required += 1
|
num_required += 1
|
||||||
@@ -549,6 +559,9 @@ class ImportManager:
|
|||||||
for i, arg in enumerate(node.args.args):
|
for i, arg in enumerate(node.args.args):
|
||||||
if arg.arg == "self":
|
if arg.arg == "self":
|
||||||
continue
|
continue
|
||||||
|
# 跳过 sample_uuids 参数(由系统自动注入)
|
||||||
|
if arg.arg == PARAM_SAMPLE_UUIDS:
|
||||||
|
continue
|
||||||
arg_info = {
|
arg_info = {
|
||||||
"name": arg.arg,
|
"name": arg.arg,
|
||||||
"type": None,
|
"type": None,
|
||||||
|
|||||||
18
unilabos/utils/requirements.txt
Normal file
18
unilabos/utils/requirements.txt
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
networkx
|
||||||
|
typing_extensions
|
||||||
|
websockets
|
||||||
|
msgcenterpy>=0.1.5
|
||||||
|
opentrons_shared_data
|
||||||
|
pint
|
||||||
|
fastapi
|
||||||
|
jinja2
|
||||||
|
requests
|
||||||
|
uvicorn
|
||||||
|
pyautogui
|
||||||
|
opcua
|
||||||
|
pyserial
|
||||||
|
pandas
|
||||||
|
crcmod-plus
|
||||||
|
pymodbus
|
||||||
|
matplotlib
|
||||||
|
pylibftdi
|
||||||
@@ -1,3 +1,104 @@
|
|||||||
|
"""
|
||||||
|
工作流转换模块 - JSON 到 WorkflowGraph 的转换流程
|
||||||
|
|
||||||
|
==================== 输入格式 (JSON) ====================
|
||||||
|
|
||||||
|
{
|
||||||
|
"workflow": [
|
||||||
|
{"action": "transfer_liquid", "action_args": {"sources": "cell_lines", "targets": "Liquid_1", "asp_vol": 100.0, "dis_vol": 74.75, ...}},
|
||||||
|
...
|
||||||
|
],
|
||||||
|
"reagent": {
|
||||||
|
"cell_lines": {"slot": 4, "well": ["A1", "A3", "A5"], "labware": "DRUG + YOYO-MEDIA"},
|
||||||
|
"Liquid_1": {"slot": 1, "well": ["A4", "A7", "A10"], "labware": "rep 1"},
|
||||||
|
...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
==================== 转换步骤 ====================
|
||||||
|
|
||||||
|
第一步: 按 slot 去重创建 create_resource 节点(创建板子)
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
- 首先创建一个 Group 节点(type="Group", minimized=true),用于包含所有 create_resource 节点
|
||||||
|
- 遍历所有 reagent,按 slot 去重,为每个唯一的 slot 创建一个板子
|
||||||
|
- 所有 create_resource 节点的 parent_uuid 指向 Group 节点,minimized=true
|
||||||
|
- 生成参数:
|
||||||
|
res_id: plate_slot_{slot}
|
||||||
|
device_id: /PRCXI
|
||||||
|
class_name: PRCXI_BioER_96_wellplate
|
||||||
|
parent: /PRCXI/PRCXI_Deck/T{slot}
|
||||||
|
slot_on_deck: "{slot}"
|
||||||
|
- 输出端口: labware(用于连接 set_liquid_from_plate)
|
||||||
|
- 控制流: create_resource 之间通过 ready 端口串联
|
||||||
|
|
||||||
|
示例: slot=1, slot=4 -> 创建 1 个 Group + 2 个 create_resource 节点
|
||||||
|
|
||||||
|
第二步: 为每个 reagent 创建 set_liquid_from_plate 节点(设置液体)
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
- 首先创建一个 Group 节点(type="Group", minimized=true),用于包含所有 set_liquid_from_plate 节点
|
||||||
|
- 遍历所有 reagent,为每个试剂创建 set_liquid_from_plate 节点
|
||||||
|
- 所有 set_liquid_from_plate 节点的 parent_uuid 指向 Group 节点,minimized=true
|
||||||
|
- 生成参数:
|
||||||
|
plate: [](通过连接传递,来自 create_resource 的 labware)
|
||||||
|
well_names: ["A1", "A3", "A5"](来自 reagent 的 well 数组)
|
||||||
|
liquid_names: ["cell_lines", "cell_lines", "cell_lines"](与 well 数量一致)
|
||||||
|
volumes: [1e5, 1e5, 1e5](与 well 数量一致,默认体积)
|
||||||
|
- 输入连接: create_resource (labware) -> set_liquid_from_plate (input_plate)
|
||||||
|
- 输出端口: output_wells(用于连接 transfer_liquid)
|
||||||
|
- 控制流: set_liquid_from_plate 连接在所有 create_resource 之后,通过 ready 端口串联
|
||||||
|
|
||||||
|
第三步: 解析 workflow,创建 transfer_liquid 等动作节点
|
||||||
|
--------------------------------------------------------------------------------
|
||||||
|
- 遍历 workflow 数组,为每个动作创建步骤节点
|
||||||
|
- 参数重命名: asp_vol -> asp_vols, dis_vol -> dis_vols, asp_flow_rate -> asp_flow_rates, dis_flow_rate -> dis_flow_rates
|
||||||
|
- 参数扩展: 根据 targets 的 wells 数量,将单值扩展为数组
|
||||||
|
例: asp_vol=100.0, targets 有 3 个 wells -> asp_vols=[100.0, 100.0, 100.0]
|
||||||
|
- 连接处理: 如果 sources/targets 已通过 set_liquid_from_plate 连接,参数值改为 []
|
||||||
|
- 输入连接: set_liquid_from_plate (output_wells) -> transfer_liquid (sources_identifier / targets_identifier)
|
||||||
|
- 输出端口: sources_out, targets_out(用于连接下一个 transfer_liquid)
|
||||||
|
|
||||||
|
==================== 连接关系图 ====================
|
||||||
|
|
||||||
|
控制流 (ready 端口串联):
|
||||||
|
- create_resource 之间: 无 ready 连接
|
||||||
|
- set_liquid_from_plate 之间: 无 ready 连接
|
||||||
|
- create_resource 与 set_liquid_from_plate 之间: 无 ready 连接
|
||||||
|
- transfer_liquid 之间: 通过 ready 端口串联
|
||||||
|
transfer_liquid_1 -> transfer_liquid_2 -> transfer_liquid_3 -> ...
|
||||||
|
|
||||||
|
物料流:
|
||||||
|
[create_resource] --labware--> [set_liquid_from_plate] --output_wells--> [transfer_liquid] --sources_out/targets_out--> [下一个 transfer_liquid]
|
||||||
|
(slot=1) (cell_lines) (input_plate) (sources_identifier) (sources_identifier)
|
||||||
|
(slot=4) (Liquid_1) (targets_identifier) (targets_identifier)
|
||||||
|
|
||||||
|
==================== 端口映射 ====================
|
||||||
|
|
||||||
|
create_resource:
|
||||||
|
输出: labware
|
||||||
|
|
||||||
|
set_liquid_from_plate:
|
||||||
|
输入: input_plate
|
||||||
|
输出: output_plate, output_wells
|
||||||
|
|
||||||
|
transfer_liquid:
|
||||||
|
输入: sources -> sources_identifier, targets -> targets_identifier
|
||||||
|
输出: sources -> sources_out, targets -> targets_out
|
||||||
|
|
||||||
|
==================== 设备名配置 (device_name) ====================
|
||||||
|
|
||||||
|
每个节点都有 device_name 字段,指定在哪个设备上执行:
|
||||||
|
- create_resource: device_name = "host_node"(固定)
|
||||||
|
- set_liquid_from_plate: device_name = "PRCXI"(可配置,见 DEVICE_NAME_DEFAULT)
|
||||||
|
- transfer_liquid 等动作: device_name = "PRCXI"(可配置,见 DEVICE_NAME_DEFAULT)
|
||||||
|
|
||||||
|
==================== 校验规则 ====================
|
||||||
|
|
||||||
|
- 检查 sources/targets 是否在 reagent 中定义
|
||||||
|
- 检查 sources 和 targets 的 wells 数量是否匹配
|
||||||
|
- 检查参数数组长度是否与 wells 数量一致
|
||||||
|
- 如有问题,在 footer 中添加 [WARN: ...] 标记
|
||||||
|
"""
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
@@ -8,6 +109,35 @@ from typing import Dict, List, Any, Tuple, Optional
|
|||||||
|
|
||||||
Json = Dict[str, Any]
|
Json = Dict[str, Any]
|
||||||
|
|
||||||
|
|
||||||
|
# ==================== 默认配置 ====================
|
||||||
|
|
||||||
|
# 设备名配置
|
||||||
|
DEVICE_NAME_HOST = "host_node" # create_resource 固定在 host_node 上执行
|
||||||
|
DEVICE_NAME_DEFAULT = "PRCXI" # transfer_liquid, set_liquid_from_plate 等动作的默认设备名
|
||||||
|
|
||||||
|
# 节点类型
|
||||||
|
NODE_TYPE_DEFAULT = "ILab" # 所有节点的默认类型
|
||||||
|
|
||||||
|
# create_resource 节点默认参数
|
||||||
|
CREATE_RESOURCE_DEFAULTS = {
|
||||||
|
"device_id": "/PRCXI",
|
||||||
|
"parent_template": "/PRCXI/PRCXI_Deck/T{slot}", # {slot} 会被替换为实际的 slot 值
|
||||||
|
"class_name": "PRCXI_BioER_96_wellplate",
|
||||||
|
}
|
||||||
|
|
||||||
|
# 默认液体体积 (uL)
|
||||||
|
DEFAULT_LIQUID_VOLUME = 1e5
|
||||||
|
|
||||||
|
# 参数重命名映射:单数 -> 复数(用于 transfer_liquid 等动作)
|
||||||
|
PARAM_RENAME_MAPPING = {
|
||||||
|
"asp_vol": "asp_vols",
|
||||||
|
"dis_vol": "dis_vols",
|
||||||
|
"asp_flow_rate": "asp_flow_rates",
|
||||||
|
"dis_flow_rate": "dis_flow_rates",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# ---------------- Graph ----------------
|
# ---------------- Graph ----------------
|
||||||
|
|
||||||
|
|
||||||
@@ -228,7 +358,7 @@ def refactor_data(
|
|||||||
|
|
||||||
|
|
||||||
def build_protocol_graph(
|
def build_protocol_graph(
|
||||||
labware_info: List[Dict[str, Any]],
|
labware_info: Dict[str, Dict[str, Any]],
|
||||||
protocol_steps: List[Dict[str, Any]],
|
protocol_steps: List[Dict[str, Any]],
|
||||||
workstation_name: str,
|
workstation_name: str,
|
||||||
action_resource_mapping: Optional[Dict[str, str]] = None,
|
action_resource_mapping: Optional[Dict[str, str]] = None,
|
||||||
@@ -236,112 +366,260 @@ def build_protocol_graph(
|
|||||||
"""统一的协议图构建函数,根据设备类型自动选择构建逻辑
|
"""统一的协议图构建函数,根据设备类型自动选择构建逻辑
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
labware_info: labware 信息字典
|
labware_info: labware 信息字典,格式为 {name: {slot, well, labware, ...}, ...}
|
||||||
protocol_steps: 协议步骤列表
|
protocol_steps: 协议步骤列表
|
||||||
workstation_name: 工作站名称
|
workstation_name: 工作站名称
|
||||||
action_resource_mapping: action 到 resource_name 的映射字典,可选
|
action_resource_mapping: action 到 resource_name 的映射字典,可选
|
||||||
"""
|
"""
|
||||||
G = WorkflowGraph()
|
G = WorkflowGraph()
|
||||||
resource_last_writer = {}
|
resource_last_writer = {} # reagent_name -> "node_id:port"
|
||||||
|
slot_to_create_resource = {} # slot -> create_resource node_id
|
||||||
|
|
||||||
protocol_steps = refactor_data(protocol_steps, action_resource_mapping)
|
protocol_steps = refactor_data(protocol_steps, action_resource_mapping)
|
||||||
# 有机化学&移液站协议图构建
|
|
||||||
WORKSTATION_ID = workstation_name
|
|
||||||
|
|
||||||
# 为所有labware创建资源节点
|
# ==================== 第一步:按 slot 去重创建 create_resource 节点 ====================
|
||||||
res_index = 0
|
# 收集所有唯一的 slot
|
||||||
|
slots_info = {} # slot -> {labware, res_id}
|
||||||
for labware_id, item in labware_info.items():
|
for labware_id, item in labware_info.items():
|
||||||
# item_id = item.get("id") or item.get("name", f"item_{uuid.uuid4()}")
|
slot = str(item.get("slot", ""))
|
||||||
node_id = str(uuid.uuid4())
|
if slot and slot not in slots_info:
|
||||||
|
res_id = f"plate_slot_{slot}"
|
||||||
|
slots_info[slot] = {
|
||||||
|
"labware": item.get("labware", ""),
|
||||||
|
"res_id": res_id,
|
||||||
|
}
|
||||||
|
|
||||||
# 判断节点类型
|
# 创建 Group 节点,包含所有 create_resource 节点
|
||||||
if "Rack" in str(labware_id) or "Tip" in str(labware_id):
|
group_node_id = str(uuid.uuid4())
|
||||||
lab_node_type = "Labware"
|
G.add_node(
|
||||||
description = f"Prepare Labware: {labware_id}"
|
group_node_id,
|
||||||
liquid_type = []
|
name="Resources Group",
|
||||||
liquid_volume = []
|
type="Group",
|
||||||
elif item.get("type") == "hardware" or "reactor" in str(labware_id).lower():
|
parent_uuid="",
|
||||||
if "reactor" not in str(labware_id).lower():
|
lab_node_type="Device",
|
||||||
continue
|
template_name="",
|
||||||
lab_node_type = "Sample"
|
resource_name="",
|
||||||
description = f"Prepare Reactor: {labware_id}"
|
footer="",
|
||||||
liquid_type = []
|
minimized=True,
|
||||||
liquid_volume = []
|
param=None,
|
||||||
else:
|
)
|
||||||
lab_node_type = "Reagent"
|
|
||||||
description = f"Add Reagent to Flask: {labware_id}"
|
# 为每个唯一的 slot 创建 create_resource 节点
|
||||||
liquid_type = [labware_id]
|
res_index = 0
|
||||||
liquid_volume = [1e5]
|
for slot, info in slots_info.items():
|
||||||
|
node_id = str(uuid.uuid4())
|
||||||
|
res_id = info["res_id"]
|
||||||
|
|
||||||
res_index += 1
|
res_index += 1
|
||||||
G.add_node(
|
G.add_node(
|
||||||
node_id,
|
node_id,
|
||||||
template_name="create_resource",
|
template_name="create_resource",
|
||||||
resource_name="host_node",
|
resource_name="host_node",
|
||||||
name=f"Res {res_index}",
|
name=f"Plate {res_index}",
|
||||||
description=description,
|
description=f"Create plate on slot {slot}",
|
||||||
lab_node_type=lab_node_type,
|
lab_node_type="Labware",
|
||||||
footer="create_resource-host_node",
|
footer="create_resource-host_node",
|
||||||
|
device_name=DEVICE_NAME_HOST,
|
||||||
|
type=NODE_TYPE_DEFAULT,
|
||||||
|
parent_uuid=group_node_id, # 指向 Group 节点
|
||||||
|
minimized=True, # 折叠显示
|
||||||
param={
|
param={
|
||||||
"res_id": labware_id,
|
"res_id": res_id,
|
||||||
"device_id": WORKSTATION_ID,
|
"device_id": CREATE_RESOURCE_DEFAULTS["device_id"],
|
||||||
"class_name": "container",
|
"class_name": CREATE_RESOURCE_DEFAULTS["class_name"],
|
||||||
"parent": WORKSTATION_ID,
|
"parent": CREATE_RESOURCE_DEFAULTS["parent_template"].format(slot=slot),
|
||||||
"bind_locations": {"x": 0.0, "y": 0.0, "z": 0.0},
|
"bind_locations": {"x": 0.0, "y": 0.0, "z": 0.0},
|
||||||
"liquid_input_slot": [-1],
|
"slot_on_deck": slot,
|
||||||
"liquid_type": liquid_type,
|
|
||||||
"liquid_volume": liquid_volume,
|
|
||||||
"slot_on_deck": "",
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
resource_last_writer[labware_id] = f"{node_id}:labware"
|
slot_to_create_resource[slot] = node_id
|
||||||
|
|
||||||
|
# create_resource 之间不需要 ready 连接
|
||||||
|
|
||||||
|
# ==================== 第二步:为每个 reagent 创建 set_liquid_from_plate 节点 ====================
|
||||||
|
# 创建 Group 节点,包含所有 set_liquid_from_plate 节点
|
||||||
|
set_liquid_group_id = str(uuid.uuid4())
|
||||||
|
G.add_node(
|
||||||
|
set_liquid_group_id,
|
||||||
|
name="SetLiquid Group",
|
||||||
|
type="Group",
|
||||||
|
parent_uuid="",
|
||||||
|
lab_node_type="Device",
|
||||||
|
template_name="",
|
||||||
|
resource_name="",
|
||||||
|
footer="",
|
||||||
|
minimized=True,
|
||||||
|
param=None,
|
||||||
|
)
|
||||||
|
|
||||||
|
set_liquid_index = 0
|
||||||
|
|
||||||
|
for labware_id, item in labware_info.items():
|
||||||
|
# 跳过 Tip/Rack 类型
|
||||||
|
if "Rack" in str(labware_id) or "Tip" in str(labware_id):
|
||||||
|
continue
|
||||||
|
if item.get("type") == "hardware":
|
||||||
|
continue
|
||||||
|
|
||||||
|
slot = str(item.get("slot", ""))
|
||||||
|
wells = item.get("well", [])
|
||||||
|
if not wells or not slot:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# res_id 不能有空格
|
||||||
|
res_id = str(labware_id).replace(" ", "_")
|
||||||
|
well_count = len(wells)
|
||||||
|
|
||||||
|
node_id = str(uuid.uuid4())
|
||||||
|
set_liquid_index += 1
|
||||||
|
|
||||||
|
G.add_node(
|
||||||
|
node_id,
|
||||||
|
template_name="set_liquid_from_plate",
|
||||||
|
resource_name="liquid_handler.prcxi",
|
||||||
|
name=f"SetLiquid {set_liquid_index}",
|
||||||
|
description=f"Set liquid: {labware_id}",
|
||||||
|
lab_node_type="Reagent",
|
||||||
|
footer="set_liquid_from_plate-liquid_handler.prcxi",
|
||||||
|
device_name=DEVICE_NAME_DEFAULT,
|
||||||
|
type=NODE_TYPE_DEFAULT,
|
||||||
|
parent_uuid=set_liquid_group_id, # 指向 Group 节点
|
||||||
|
minimized=True, # 折叠显示
|
||||||
|
param={
|
||||||
|
"plate": [], # 通过连接传递
|
||||||
|
"well_names": wells, # 孔位名数组,如 ["A1", "A3", "A5"]
|
||||||
|
"liquid_names": [res_id] * well_count,
|
||||||
|
"volumes": [DEFAULT_LIQUID_VOLUME] * well_count,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
# set_liquid_from_plate 之间不需要 ready 连接
|
||||||
|
|
||||||
|
# 物料流:create_resource 的 labware -> set_liquid_from_plate 的 input_plate
|
||||||
|
create_res_node_id = slot_to_create_resource.get(slot)
|
||||||
|
if create_res_node_id:
|
||||||
|
G.add_edge(create_res_node_id, node_id, source_port="labware", target_port="input_plate")
|
||||||
|
|
||||||
|
# set_liquid_from_plate 的输出 output_wells 用于连接 transfer_liquid
|
||||||
|
resource_last_writer[labware_id] = f"{node_id}:output_wells"
|
||||||
|
|
||||||
|
# transfer_liquid 之间通过 ready 串联,从 None 开始
|
||||||
last_control_node_id = None
|
last_control_node_id = None
|
||||||
|
|
||||||
|
# 端口名称映射:JSON 字段名 -> 实际 handle key
|
||||||
|
INPUT_PORT_MAPPING = {
|
||||||
|
"sources": "sources_identifier",
|
||||||
|
"targets": "targets_identifier",
|
||||||
|
"vessel": "vessel",
|
||||||
|
"to_vessel": "to_vessel",
|
||||||
|
"from_vessel": "from_vessel",
|
||||||
|
"reagent": "reagent",
|
||||||
|
"solvent": "solvent",
|
||||||
|
"compound": "compound",
|
||||||
|
}
|
||||||
|
|
||||||
|
OUTPUT_PORT_MAPPING = {
|
||||||
|
"sources": "sources_out", # 输出端口是 xxx_out
|
||||||
|
"targets": "targets_out", # 输出端口是 xxx_out
|
||||||
|
"vessel": "vessel_out",
|
||||||
|
"to_vessel": "to_vessel_out",
|
||||||
|
"from_vessel": "from_vessel_out",
|
||||||
|
"filtrate_vessel": "filtrate_out",
|
||||||
|
"reagent": "reagent",
|
||||||
|
"solvent": "solvent",
|
||||||
|
"compound": "compound",
|
||||||
|
}
|
||||||
|
|
||||||
|
# 需要根据 wells 数量扩展的参数列表(复数形式)
|
||||||
|
EXPAND_BY_WELLS_PARAMS = ["asp_vols", "dis_vols", "asp_flow_rates", "dis_flow_rates"]
|
||||||
|
|
||||||
# 处理协议步骤
|
# 处理协议步骤
|
||||||
for step in protocol_steps:
|
for step in protocol_steps:
|
||||||
node_id = str(uuid.uuid4())
|
node_id = str(uuid.uuid4())
|
||||||
G.add_node(node_id, **step)
|
params = step.get("param", {}).copy() # 复制一份,避免修改原数据
|
||||||
|
connected_params = set() # 记录被连接的参数
|
||||||
|
warnings = [] # 收集警告信息
|
||||||
|
|
||||||
|
# 参数重命名:单数 -> 复数
|
||||||
|
for old_name, new_name in PARAM_RENAME_MAPPING.items():
|
||||||
|
if old_name in params:
|
||||||
|
params[new_name] = params.pop(old_name)
|
||||||
|
|
||||||
|
# 处理输入连接
|
||||||
|
for param_key, target_port in INPUT_PORT_MAPPING.items():
|
||||||
|
resource_name = params.get(param_key)
|
||||||
|
if resource_name and resource_name in resource_last_writer:
|
||||||
|
source_node, source_port = resource_last_writer[resource_name].split(":")
|
||||||
|
G.add_edge(source_node, node_id, source_port=source_port, target_port=target_port)
|
||||||
|
connected_params.add(param_key)
|
||||||
|
elif resource_name and resource_name not in resource_last_writer:
|
||||||
|
# 资源名在 labware_info 中不存在
|
||||||
|
warnings.append(f"{param_key}={resource_name} 未找到")
|
||||||
|
|
||||||
|
# 获取 targets 对应的 wells 数量,用于扩展参数
|
||||||
|
targets_name = params.get("targets")
|
||||||
|
sources_name = params.get("sources")
|
||||||
|
targets_wells_count = 1
|
||||||
|
sources_wells_count = 1
|
||||||
|
|
||||||
|
if targets_name and targets_name in labware_info:
|
||||||
|
target_wells = labware_info[targets_name].get("well", [])
|
||||||
|
targets_wells_count = len(target_wells) if target_wells else 1
|
||||||
|
elif targets_name:
|
||||||
|
warnings.append(f"targets={targets_name} 未在 reagent 中定义")
|
||||||
|
|
||||||
|
if sources_name and sources_name in labware_info:
|
||||||
|
source_wells = labware_info[sources_name].get("well", [])
|
||||||
|
sources_wells_count = len(source_wells) if source_wells else 1
|
||||||
|
elif sources_name:
|
||||||
|
warnings.append(f"sources={sources_name} 未在 reagent 中定义")
|
||||||
|
|
||||||
|
# 检查 sources 和 targets 的 wells 数量是否匹配
|
||||||
|
if targets_wells_count != sources_wells_count and targets_name and sources_name:
|
||||||
|
warnings.append(f"wells 数量不匹配: sources={sources_wells_count}, targets={targets_wells_count}")
|
||||||
|
|
||||||
|
# 使用 targets 的 wells 数量来扩展参数
|
||||||
|
wells_count = targets_wells_count
|
||||||
|
|
||||||
|
# 扩展单值参数为数组(根据 targets 的 wells 数量)
|
||||||
|
for expand_param in EXPAND_BY_WELLS_PARAMS:
|
||||||
|
if expand_param in params:
|
||||||
|
value = params[expand_param]
|
||||||
|
# 如果是单个值,扩展为数组
|
||||||
|
if not isinstance(value, list):
|
||||||
|
params[expand_param] = [value] * wells_count
|
||||||
|
# 如果已经是数组但长度不对,记录警告
|
||||||
|
elif len(value) != wells_count:
|
||||||
|
warnings.append(f"{expand_param} 数量({len(value)})与 wells({wells_count})不匹配")
|
||||||
|
|
||||||
|
# 如果 sources/targets 已通过连接传递,将参数值改为空数组
|
||||||
|
for param_key in connected_params:
|
||||||
|
if param_key in params:
|
||||||
|
params[param_key] = []
|
||||||
|
|
||||||
|
# 更新 step 的 param、footer、device_name 和 type
|
||||||
|
step_copy = step.copy()
|
||||||
|
step_copy["param"] = params
|
||||||
|
step_copy["device_name"] = DEVICE_NAME_DEFAULT # 动作节点使用默认设备名
|
||||||
|
step_copy["type"] = NODE_TYPE_DEFAULT # 节点类型
|
||||||
|
|
||||||
|
# 如果有警告,修改 footer 添加警告标记(警告放前面)
|
||||||
|
if warnings:
|
||||||
|
original_footer = step.get("footer", "")
|
||||||
|
step_copy["footer"] = f"[WARN: {'; '.join(warnings)}] {original_footer}"
|
||||||
|
|
||||||
|
G.add_node(node_id, **step_copy)
|
||||||
|
|
||||||
# 控制流
|
# 控制流
|
||||||
if last_control_node_id is not None:
|
if last_control_node_id is not None:
|
||||||
G.add_edge(last_control_node_id, node_id, source_port="ready", target_port="ready")
|
G.add_edge(last_control_node_id, node_id, source_port="ready", target_port="ready")
|
||||||
last_control_node_id = node_id
|
last_control_node_id = node_id
|
||||||
|
|
||||||
# 物料流
|
# 处理输出:更新 resource_last_writer
|
||||||
params = step.get("param", {})
|
for param_key, output_port in OUTPUT_PORT_MAPPING.items():
|
||||||
input_resources_possible_names = [
|
resource_name = step.get("param", {}).get(param_key) # 使用原始参数值
|
||||||
"vessel",
|
|
||||||
"to_vessel",
|
|
||||||
"from_vessel",
|
|
||||||
"reagent",
|
|
||||||
"solvent",
|
|
||||||
"compound",
|
|
||||||
"sources",
|
|
||||||
"targets",
|
|
||||||
]
|
|
||||||
|
|
||||||
for target_port in input_resources_possible_names:
|
|
||||||
resource_name = params.get(target_port)
|
|
||||||
if resource_name and resource_name in resource_last_writer:
|
|
||||||
source_node, source_port = resource_last_writer[resource_name].split(":")
|
|
||||||
G.add_edge(source_node, node_id, source_port=source_port, target_port=target_port)
|
|
||||||
|
|
||||||
output_resources = {
|
|
||||||
"vessel_out": params.get("vessel"),
|
|
||||||
"from_vessel_out": params.get("from_vessel"),
|
|
||||||
"to_vessel_out": params.get("to_vessel"),
|
|
||||||
"filtrate_out": params.get("filtrate_vessel"),
|
|
||||||
"reagent": params.get("reagent"),
|
|
||||||
"solvent": params.get("solvent"),
|
|
||||||
"compound": params.get("compound"),
|
|
||||||
"sources_out": params.get("sources"),
|
|
||||||
"targets_out": params.get("targets"),
|
|
||||||
}
|
|
||||||
|
|
||||||
for source_port, resource_name in output_resources.items():
|
|
||||||
if resource_name:
|
if resource_name:
|
||||||
resource_last_writer[resource_name] = f"{node_id}:{source_port}"
|
resource_last_writer[resource_name] = f"{node_id}:{output_port}"
|
||||||
|
|
||||||
return G
|
return G
|
||||||
|
|
||||||
|
|||||||
@@ -1,21 +1,68 @@
|
|||||||
"""
|
"""
|
||||||
JSON 工作流转换模块
|
JSON 工作流转换模块
|
||||||
|
|
||||||
提供从多种 JSON 格式转换为统一工作流格式的功能。
|
将 workflow/reagent 格式的 JSON 转换为统一工作流格式。
|
||||||
支持的格式:
|
|
||||||
1. workflow/reagent 格式
|
输入格式:
|
||||||
2. steps_info/labware_info 格式
|
{
|
||||||
|
"workflow": [
|
||||||
|
{"action": "...", "action_args": {...}},
|
||||||
|
...
|
||||||
|
],
|
||||||
|
"reagent": {
|
||||||
|
"reagent_name": {"slot": int, "well": [...], "labware": "..."},
|
||||||
|
...
|
||||||
|
}
|
||||||
|
}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from os import PathLike
|
from os import PathLike
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||||
|
|
||||||
from unilabos.workflow.common import WorkflowGraph, build_protocol_graph
|
from unilabos.workflow.common import WorkflowGraph, build_protocol_graph
|
||||||
from unilabos.registry.registry import lab_registry
|
from unilabos.registry.registry import lab_registry
|
||||||
|
|
||||||
|
|
||||||
|
# ==================== 字段映射配置 ====================
|
||||||
|
|
||||||
|
# action 到 resource_name 的映射
|
||||||
|
ACTION_RESOURCE_MAPPING: Dict[str, str] = {
|
||||||
|
# 生物实验操作
|
||||||
|
"transfer_liquid": "liquid_handler.prcxi",
|
||||||
|
"transfer": "liquid_handler.prcxi",
|
||||||
|
"incubation": "incubator.prcxi",
|
||||||
|
"move_labware": "labware_mover.prcxi",
|
||||||
|
"oscillation": "shaker.prcxi",
|
||||||
|
# 有机化学操作
|
||||||
|
"HeatChillToTemp": "heatchill.chemputer",
|
||||||
|
"StopHeatChill": "heatchill.chemputer",
|
||||||
|
"StartHeatChill": "heatchill.chemputer",
|
||||||
|
"HeatChill": "heatchill.chemputer",
|
||||||
|
"Dissolve": "stirrer.chemputer",
|
||||||
|
"Transfer": "liquid_handler.chemputer",
|
||||||
|
"Evaporate": "rotavap.chemputer",
|
||||||
|
"Recrystallize": "reactor.chemputer",
|
||||||
|
"Filter": "filter.chemputer",
|
||||||
|
"Dry": "dryer.chemputer",
|
||||||
|
"Add": "liquid_handler.chemputer",
|
||||||
|
}
|
||||||
|
|
||||||
|
# action_args 字段到 parameters 字段的映射
|
||||||
|
# 格式: {"old_key": "new_key"}, 仅映射需要重命名的字段
|
||||||
|
ARGS_FIELD_MAPPING: Dict[str, str] = {
|
||||||
|
# 如果需要字段重命名,在这里配置
|
||||||
|
# "old_field_name": "new_field_name",
|
||||||
|
}
|
||||||
|
|
||||||
|
# 默认工作站名称
|
||||||
|
DEFAULT_WORKSTATION = "PRCXI"
|
||||||
|
|
||||||
|
|
||||||
|
# ==================== 核心转换函数 ====================
|
||||||
|
|
||||||
|
|
||||||
def get_action_handles(resource_name: str, template_name: str) -> Dict[str, List[str]]:
|
def get_action_handles(resource_name: str, template_name: str) -> Dict[str, List[str]]:
|
||||||
"""
|
"""
|
||||||
从 registry 获取指定设备和动作的 handles 配置
|
从 registry 获取指定设备和动作的 handles 配置
|
||||||
@@ -39,12 +86,10 @@ def get_action_handles(resource_name: str, template_name: str) -> Dict[str, List
|
|||||||
handles = action_config.get("handles", {})
|
handles = action_config.get("handles", {})
|
||||||
|
|
||||||
if isinstance(handles, dict):
|
if isinstance(handles, dict):
|
||||||
# 处理 input handles (作为 target)
|
|
||||||
for handle in handles.get("input", []):
|
for handle in handles.get("input", []):
|
||||||
handler_key = handle.get("handler_key", "")
|
handler_key = handle.get("handler_key", "")
|
||||||
if handler_key:
|
if handler_key:
|
||||||
result["source"].append(handler_key)
|
result["source"].append(handler_key)
|
||||||
# 处理 output handles (作为 source)
|
|
||||||
for handle in handles.get("output", []):
|
for handle in handles.get("output", []):
|
||||||
handler_key = handle.get("handler_key", "")
|
handler_key = handle.get("handler_key", "")
|
||||||
if handler_key:
|
if handler_key:
|
||||||
@@ -69,12 +114,9 @@ def validate_workflow_handles(graph: WorkflowGraph) -> Tuple[bool, List[str]]:
|
|||||||
for edge in graph.edges:
|
for edge in graph.edges:
|
||||||
left_uuid = edge.get("source")
|
left_uuid = edge.get("source")
|
||||||
right_uuid = edge.get("target")
|
right_uuid = edge.get("target")
|
||||||
# target_handle_key是target, right的输入节点(入节点)
|
|
||||||
# source_handle_key是source, left的输出节点(出节点)
|
|
||||||
right_source_conn_key = edge.get("target_handle_key", "")
|
right_source_conn_key = edge.get("target_handle_key", "")
|
||||||
left_target_conn_key = edge.get("source_handle_key", "")
|
left_target_conn_key = edge.get("source_handle_key", "")
|
||||||
|
|
||||||
# 获取源节点和目标节点信息
|
|
||||||
left_node = nodes.get(left_uuid, {})
|
left_node = nodes.get(left_uuid, {})
|
||||||
right_node = nodes.get(right_uuid, {})
|
right_node = nodes.get(right_uuid, {})
|
||||||
|
|
||||||
@@ -83,164 +125,93 @@ def validate_workflow_handles(graph: WorkflowGraph) -> Tuple[bool, List[str]]:
|
|||||||
right_res_name = right_node.get("resource_name", "")
|
right_res_name = right_node.get("resource_name", "")
|
||||||
right_template_name = right_node.get("template_name", "")
|
right_template_name = right_node.get("template_name", "")
|
||||||
|
|
||||||
# 获取源节点的 output handles
|
|
||||||
left_node_handles = get_action_handles(left_res_name, left_template_name)
|
left_node_handles = get_action_handles(left_res_name, left_template_name)
|
||||||
target_valid_keys = left_node_handles.get("target", [])
|
target_valid_keys = left_node_handles.get("target", [])
|
||||||
target_valid_keys.append("ready")
|
target_valid_keys.append("ready")
|
||||||
|
|
||||||
# 获取目标节点的 input handles
|
|
||||||
right_node_handles = get_action_handles(right_res_name, right_template_name)
|
right_node_handles = get_action_handles(right_res_name, right_template_name)
|
||||||
source_valid_keys = right_node_handles.get("source", [])
|
source_valid_keys = right_node_handles.get("source", [])
|
||||||
source_valid_keys.append("ready")
|
source_valid_keys.append("ready")
|
||||||
|
|
||||||
# 如果节点配置了 output handles,则 source_port 必须有效
|
# 验证目标节点(right)的输入端口
|
||||||
if not right_source_conn_key:
|
if not right_source_conn_key:
|
||||||
node_name = left_node.get("name", left_uuid[:8])
|
node_name = right_node.get("name", right_uuid[:8])
|
||||||
errors.append(f"源节点 '{node_name}' 的 source_handle_key 为空," f"应设置为: {source_valid_keys}")
|
errors.append(f"目标节点 '{node_name}' 的输入端口 (target_handle_key) 为空,应设置为: {source_valid_keys}")
|
||||||
elif right_source_conn_key not in source_valid_keys:
|
elif right_source_conn_key not in source_valid_keys:
|
||||||
node_name = left_node.get("name", left_uuid[:8])
|
node_name = right_node.get("name", right_uuid[:8])
|
||||||
errors.append(
|
errors.append(
|
||||||
f"源节点 '{node_name}' 的 source 端点 '{right_source_conn_key}' 不存在," f"支持的端点: {source_valid_keys}"
|
f"目标节点 '{node_name}' 的输入端口 '{right_source_conn_key}' 不存在,支持的输入端口: {source_valid_keys}"
|
||||||
)
|
)
|
||||||
|
|
||||||
# 如果节点配置了 input handles,则 target_port 必须有效
|
# 验证源节点(left)的输出端口
|
||||||
if not left_target_conn_key:
|
if not left_target_conn_key:
|
||||||
node_name = right_node.get("name", right_uuid[:8])
|
node_name = left_node.get("name", left_uuid[:8])
|
||||||
errors.append(f"目标节点 '{node_name}' 的 target_handle_key 为空," f"应设置为: {target_valid_keys}")
|
errors.append(f"源节点 '{node_name}' 的输出端口 (source_handle_key) 为空,应设置为: {target_valid_keys}")
|
||||||
elif left_target_conn_key not in target_valid_keys:
|
elif left_target_conn_key not in target_valid_keys:
|
||||||
node_name = right_node.get("name", right_uuid[:8])
|
node_name = left_node.get("name", left_uuid[:8])
|
||||||
errors.append(
|
errors.append(
|
||||||
f"目标节点 '{node_name}' 的 target 端点 '{left_target_conn_key}' 不存在,"
|
f"源节点 '{node_name}' 的输出端口 '{left_target_conn_key}' 不存在,支持的输出端口: {target_valid_keys}"
|
||||||
f"支持的端点: {target_valid_keys}"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return len(errors) == 0, errors
|
return len(errors) == 0, errors
|
||||||
|
|
||||||
|
|
||||||
# action 到 resource_name 的映射
|
def normalize_workflow_steps(workflow: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||||
ACTION_RESOURCE_MAPPING: Dict[str, str] = {
|
|
||||||
# 生物实验操作
|
|
||||||
"transfer_liquid": "liquid_handler.prcxi",
|
|
||||||
"transfer": "liquid_handler.prcxi",
|
|
||||||
"incubation": "incubator.prcxi",
|
|
||||||
"move_labware": "labware_mover.prcxi",
|
|
||||||
"oscillation": "shaker.prcxi",
|
|
||||||
# 有机化学操作
|
|
||||||
"HeatChillToTemp": "heatchill.chemputer",
|
|
||||||
"StopHeatChill": "heatchill.chemputer",
|
|
||||||
"StartHeatChill": "heatchill.chemputer",
|
|
||||||
"HeatChill": "heatchill.chemputer",
|
|
||||||
"Dissolve": "stirrer.chemputer",
|
|
||||||
"Transfer": "liquid_handler.chemputer",
|
|
||||||
"Evaporate": "rotavap.chemputer",
|
|
||||||
"Recrystallize": "reactor.chemputer",
|
|
||||||
"Filter": "filter.chemputer",
|
|
||||||
"Dry": "dryer.chemputer",
|
|
||||||
"Add": "liquid_handler.chemputer",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_steps(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
|
||||||
"""
|
"""
|
||||||
将不同格式的步骤数据规范化为统一格式
|
将 workflow 格式的步骤数据规范化
|
||||||
|
|
||||||
支持的输入格式:
|
输入格式:
|
||||||
- action + parameters
|
[{"action": "...", "action_args": {...}}, ...]
|
||||||
- action + action_args
|
|
||||||
- operation + parameters
|
输出格式:
|
||||||
|
[{"action": "...", "parameters": {...}, "step_number": int}, ...]
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
data: 原始步骤数据列表
|
workflow: workflow 数组
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
规范化后的步骤列表,格式为 [{"action": str, "parameters": dict, "description": str?, "step_number": int?}, ...]
|
规范化后的步骤列表
|
||||||
"""
|
"""
|
||||||
normalized = []
|
normalized = []
|
||||||
for idx, step in enumerate(data):
|
for idx, step in enumerate(workflow):
|
||||||
# 获取动作名称(支持 action 或 operation 字段)
|
action = step.get("action")
|
||||||
action = step.get("action") or step.get("operation")
|
|
||||||
if not action:
|
if not action:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# 获取参数(支持 parameters 或 action_args 字段)
|
# 获取参数: action_args
|
||||||
raw_params = step.get("parameters") or step.get("action_args") or {}
|
raw_params = step.get("action_args", {})
|
||||||
params = dict(raw_params)
|
params = {}
|
||||||
|
|
||||||
# 规范化 source/target -> sources/targets
|
# 应用字段映射
|
||||||
if "source" in raw_params and "sources" not in raw_params:
|
for key, value in raw_params.items():
|
||||||
params["sources"] = raw_params["source"]
|
mapped_key = ARGS_FIELD_MAPPING.get(key, key)
|
||||||
if "target" in raw_params and "targets" not in raw_params:
|
params[mapped_key] = value
|
||||||
params["targets"] = raw_params["target"]
|
|
||||||
|
|
||||||
# 获取描述(支持 description 或 purpose 字段)
|
step_dict = {
|
||||||
description = step.get("description") or step.get("purpose")
|
"action": action,
|
||||||
|
"parameters": params,
|
||||||
|
"step_number": idx + 1,
|
||||||
|
}
|
||||||
|
|
||||||
# 获取步骤编号(优先使用原始数据中的 step_number,否则使用索引+1)
|
# 保留描述字段
|
||||||
step_number = step.get("step_number", idx + 1)
|
if "description" in step:
|
||||||
|
step_dict["description"] = step["description"]
|
||||||
step_dict = {"action": action, "parameters": params, "step_number": step_number}
|
|
||||||
if description:
|
|
||||||
step_dict["description"] = description
|
|
||||||
|
|
||||||
normalized.append(step_dict)
|
normalized.append(step_dict)
|
||||||
|
|
||||||
return normalized
|
return normalized
|
||||||
|
|
||||||
|
|
||||||
def normalize_labware(data: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
|
|
||||||
"""
|
|
||||||
将不同格式的 labware 数据规范化为统一的字典格式
|
|
||||||
|
|
||||||
支持的输入格式:
|
|
||||||
- reagent_name + material_name + positions
|
|
||||||
- name + labware + slot
|
|
||||||
|
|
||||||
Args:
|
|
||||||
data: 原始 labware 数据列表
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
规范化后的 labware 字典,格式为 {name: {"slot": int, "labware": str, "well": list, "type": str, "role": str, "name": str}, ...}
|
|
||||||
"""
|
|
||||||
labware = {}
|
|
||||||
for item in data:
|
|
||||||
# 获取 key 名称(优先使用 reagent_name,其次是 material_name 或 name)
|
|
||||||
reagent_name = item.get("reagent_name")
|
|
||||||
key = reagent_name or item.get("material_name") or item.get("name")
|
|
||||||
if not key:
|
|
||||||
continue
|
|
||||||
|
|
||||||
key = str(key)
|
|
||||||
|
|
||||||
# 处理重复 key,自动添加后缀
|
|
||||||
idx = 1
|
|
||||||
original_key = key
|
|
||||||
while key in labware:
|
|
||||||
idx += 1
|
|
||||||
key = f"{original_key}_{idx}"
|
|
||||||
|
|
||||||
labware[key] = {
|
|
||||||
"slot": item.get("positions") or item.get("slot"),
|
|
||||||
"labware": item.get("material_name") or item.get("labware"),
|
|
||||||
"well": item.get("well", []),
|
|
||||||
"type": item.get("type", "reagent"),
|
|
||||||
"role": item.get("role", ""),
|
|
||||||
"name": key,
|
|
||||||
}
|
|
||||||
|
|
||||||
return labware
|
|
||||||
|
|
||||||
|
|
||||||
def convert_from_json(
|
def convert_from_json(
|
||||||
data: Union[str, PathLike, Dict[str, Any]],
|
data: Union[str, PathLike, Dict[str, Any]],
|
||||||
workstation_name: str = "PRCXi",
|
workstation_name: str = DEFAULT_WORKSTATION,
|
||||||
validate: bool = True,
|
validate: bool = True,
|
||||||
) -> WorkflowGraph:
|
) -> WorkflowGraph:
|
||||||
"""
|
"""
|
||||||
从 JSON 数据或文件转换为 WorkflowGraph
|
从 JSON 数据或文件转换为 WorkflowGraph
|
||||||
|
|
||||||
支持的 JSON 格式:
|
JSON 格式:
|
||||||
1. {"workflow": [...], "reagent": {...}} - 直接格式
|
{"workflow": [...], "reagent": {...}}
|
||||||
2. {"steps_info": [...], "labware_info": [...]} - 需要规范化的格式
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
data: JSON 文件路径、字典数据、或 JSON 字符串
|
data: JSON 文件路径、字典数据、或 JSON 字符串
|
||||||
@@ -251,7 +222,7 @@ def convert_from_json(
|
|||||||
WorkflowGraph: 构建好的工作流图
|
WorkflowGraph: 构建好的工作流图
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
ValueError: 不支持的 JSON 格式 或 句柄校验失败
|
ValueError: 不支持的 JSON 格式
|
||||||
FileNotFoundError: 文件不存在
|
FileNotFoundError: 文件不存在
|
||||||
json.JSONDecodeError: JSON 解析失败
|
json.JSONDecodeError: JSON 解析失败
|
||||||
"""
|
"""
|
||||||
@@ -262,7 +233,6 @@ def convert_from_json(
|
|||||||
with path.open("r", encoding="utf-8") as fp:
|
with path.open("r", encoding="utf-8") as fp:
|
||||||
json_data = json.load(fp)
|
json_data = json.load(fp)
|
||||||
elif isinstance(data, str):
|
elif isinstance(data, str):
|
||||||
# 尝试作为 JSON 字符串解析
|
|
||||||
json_data = json.loads(data)
|
json_data = json.loads(data)
|
||||||
else:
|
else:
|
||||||
raise FileNotFoundError(f"文件不存在: {data}")
|
raise FileNotFoundError(f"文件不存在: {data}")
|
||||||
@@ -271,30 +241,24 @@ def convert_from_json(
|
|||||||
else:
|
else:
|
||||||
raise TypeError(f"不支持的数据类型: {type(data)}")
|
raise TypeError(f"不支持的数据类型: {type(data)}")
|
||||||
|
|
||||||
# 根据格式解析数据
|
# 校验格式
|
||||||
if "workflow" in json_data and "reagent" in json_data:
|
if "workflow" not in json_data or "reagent" not in json_data:
|
||||||
# 格式1: workflow/reagent(已经是规范格式)
|
|
||||||
protocol_steps = json_data["workflow"]
|
|
||||||
labware_info = json_data["reagent"]
|
|
||||||
elif "steps_info" in json_data and "labware_info" in json_data:
|
|
||||||
# 格式2: steps_info/labware_info(需要规范化)
|
|
||||||
protocol_steps = normalize_steps(json_data["steps_info"])
|
|
||||||
labware_info = normalize_labware(json_data["labware_info"])
|
|
||||||
elif "steps" in json_data and "labware" in json_data:
|
|
||||||
# 格式3: steps/labware(另一种常见格式)
|
|
||||||
protocol_steps = normalize_steps(json_data["steps"])
|
|
||||||
if isinstance(json_data["labware"], list):
|
|
||||||
labware_info = normalize_labware(json_data["labware"])
|
|
||||||
else:
|
|
||||||
labware_info = json_data["labware"]
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"不支持的 JSON 格式。支持的格式:\n"
|
"不支持的 JSON 格式。请使用标准格式:\n"
|
||||||
"1. {'workflow': [...], 'reagent': {...}}\n"
|
'{"workflow": [{"action": "...", "action_args": {...}}, ...], '
|
||||||
"2. {'steps_info': [...], 'labware_info': [...]}\n"
|
'"reagent": {"name": {"slot": int, "well": [...], "labware": "..."}, ...}}'
|
||||||
"3. {'steps': [...], 'labware': [...]}"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# 提取数据
|
||||||
|
workflow = json_data["workflow"]
|
||||||
|
reagent = json_data["reagent"]
|
||||||
|
|
||||||
|
# 规范化步骤数据
|
||||||
|
protocol_steps = normalize_workflow_steps(workflow)
|
||||||
|
|
||||||
|
# reagent 已经是字典格式,直接使用
|
||||||
|
labware_info = reagent
|
||||||
|
|
||||||
# 构建工作流图
|
# 构建工作流图
|
||||||
graph = build_protocol_graph(
|
graph = build_protocol_graph(
|
||||||
labware_info=labware_info,
|
labware_info=labware_info,
|
||||||
@@ -317,7 +281,7 @@ def convert_from_json(
|
|||||||
|
|
||||||
def convert_json_to_node_link(
|
def convert_json_to_node_link(
|
||||||
data: Union[str, PathLike, Dict[str, Any]],
|
data: Union[str, PathLike, Dict[str, Any]],
|
||||||
workstation_name: str = "PRCXi",
|
workstation_name: str = DEFAULT_WORKSTATION,
|
||||||
) -> Dict[str, Any]:
|
) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
将 JSON 数据转换为 node-link 格式的字典
|
将 JSON 数据转换为 node-link 格式的字典
|
||||||
@@ -335,7 +299,7 @@ def convert_json_to_node_link(
|
|||||||
|
|
||||||
def convert_json_to_workflow_list(
|
def convert_json_to_workflow_list(
|
||||||
data: Union[str, PathLike, Dict[str, Any]],
|
data: Union[str, PathLike, Dict[str, Any]],
|
||||||
workstation_name: str = "PRCXi",
|
workstation_name: str = DEFAULT_WORKSTATION,
|
||||||
) -> List[Dict[str, Any]]:
|
) -> List[Dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
将 JSON 数据转换为工作流列表格式
|
将 JSON 数据转换为工作流列表格式
|
||||||
@@ -349,8 +313,3 @@ def convert_json_to_workflow_list(
|
|||||||
"""
|
"""
|
||||||
graph = convert_from_json(data, workstation_name)
|
graph = convert_from_json(data, workstation_name)
|
||||||
return graph.to_dict()
|
return graph.to_dict()
|
||||||
|
|
||||||
|
|
||||||
# 为了向后兼容,保留下划线前缀的别名
|
|
||||||
_normalize_steps = normalize_steps
|
|
||||||
_normalize_labware = normalize_labware
|
|
||||||
|
|||||||
356
unilabos/workflow/legacy/convert_from_json_legacy.py
Normal file
356
unilabos/workflow/legacy/convert_from_json_legacy.py
Normal file
@@ -0,0 +1,356 @@
|
|||||||
|
"""
|
||||||
|
JSON 工作流转换模块
|
||||||
|
|
||||||
|
提供从多种 JSON 格式转换为统一工作流格式的功能。
|
||||||
|
支持的格式:
|
||||||
|
1. workflow/reagent 格式
|
||||||
|
2. steps_info/labware_info 格式
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
from os import PathLike
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Dict, List, Optional, Set, Tuple, Union
|
||||||
|
|
||||||
|
from unilabos.workflow.common import WorkflowGraph, build_protocol_graph
|
||||||
|
from unilabos.registry.registry import lab_registry
|
||||||
|
|
||||||
|
|
||||||
|
def get_action_handles(resource_name: str, template_name: str) -> Dict[str, List[str]]:
|
||||||
|
"""
|
||||||
|
从 registry 获取指定设备和动作的 handles 配置
|
||||||
|
|
||||||
|
Args:
|
||||||
|
resource_name: 设备资源名称,如 "liquid_handler.prcxi"
|
||||||
|
template_name: 动作模板名称,如 "transfer_liquid"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
包含 source 和 target handler_keys 的字典:
|
||||||
|
{"source": ["sources_out", "targets_out", ...], "target": ["sources", "targets", ...]}
|
||||||
|
"""
|
||||||
|
result = {"source": [], "target": []}
|
||||||
|
|
||||||
|
device_info = lab_registry.device_type_registry.get(resource_name, {})
|
||||||
|
if not device_info:
|
||||||
|
return result
|
||||||
|
|
||||||
|
action_mappings = device_info.get("class", {}).get("action_value_mappings", {})
|
||||||
|
action_config = action_mappings.get(template_name, {})
|
||||||
|
handles = action_config.get("handles", {})
|
||||||
|
|
||||||
|
if isinstance(handles, dict):
|
||||||
|
# 处理 input handles (作为 target)
|
||||||
|
for handle in handles.get("input", []):
|
||||||
|
handler_key = handle.get("handler_key", "")
|
||||||
|
if handler_key:
|
||||||
|
result["source"].append(handler_key)
|
||||||
|
# 处理 output handles (作为 source)
|
||||||
|
for handle in handles.get("output", []):
|
||||||
|
handler_key = handle.get("handler_key", "")
|
||||||
|
if handler_key:
|
||||||
|
result["target"].append(handler_key)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def validate_workflow_handles(graph: WorkflowGraph) -> Tuple[bool, List[str]]:
|
||||||
|
"""
|
||||||
|
校验工作流图中所有边的句柄配置是否正确
|
||||||
|
|
||||||
|
Args:
|
||||||
|
graph: 工作流图对象
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
(is_valid, errors): 是否有效,错误信息列表
|
||||||
|
"""
|
||||||
|
errors = []
|
||||||
|
nodes = graph.nodes
|
||||||
|
|
||||||
|
for edge in graph.edges:
|
||||||
|
left_uuid = edge.get("source")
|
||||||
|
right_uuid = edge.get("target")
|
||||||
|
# target_handle_key是target, right的输入节点(入节点)
|
||||||
|
# source_handle_key是source, left的输出节点(出节点)
|
||||||
|
right_source_conn_key = edge.get("target_handle_key", "")
|
||||||
|
left_target_conn_key = edge.get("source_handle_key", "")
|
||||||
|
|
||||||
|
# 获取源节点和目标节点信息
|
||||||
|
left_node = nodes.get(left_uuid, {})
|
||||||
|
right_node = nodes.get(right_uuid, {})
|
||||||
|
|
||||||
|
left_res_name = left_node.get("resource_name", "")
|
||||||
|
left_template_name = left_node.get("template_name", "")
|
||||||
|
right_res_name = right_node.get("resource_name", "")
|
||||||
|
right_template_name = right_node.get("template_name", "")
|
||||||
|
|
||||||
|
# 获取源节点的 output handles
|
||||||
|
left_node_handles = get_action_handles(left_res_name, left_template_name)
|
||||||
|
target_valid_keys = left_node_handles.get("target", [])
|
||||||
|
target_valid_keys.append("ready")
|
||||||
|
|
||||||
|
# 获取目标节点的 input handles
|
||||||
|
right_node_handles = get_action_handles(right_res_name, right_template_name)
|
||||||
|
source_valid_keys = right_node_handles.get("source", [])
|
||||||
|
source_valid_keys.append("ready")
|
||||||
|
|
||||||
|
# 如果节点配置了 output handles,则 source_port 必须有效
|
||||||
|
if not right_source_conn_key:
|
||||||
|
node_name = left_node.get("name", left_uuid[:8])
|
||||||
|
errors.append(f"源节点 '{node_name}' 的 source_handle_key 为空," f"应设置为: {source_valid_keys}")
|
||||||
|
elif right_source_conn_key not in source_valid_keys:
|
||||||
|
node_name = left_node.get("name", left_uuid[:8])
|
||||||
|
errors.append(
|
||||||
|
f"源节点 '{node_name}' 的 source 端点 '{right_source_conn_key}' 不存在," f"支持的端点: {source_valid_keys}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# 如果节点配置了 input handles,则 target_port 必须有效
|
||||||
|
if not left_target_conn_key:
|
||||||
|
node_name = right_node.get("name", right_uuid[:8])
|
||||||
|
errors.append(f"目标节点 '{node_name}' 的 target_handle_key 为空," f"应设置为: {target_valid_keys}")
|
||||||
|
elif left_target_conn_key not in target_valid_keys:
|
||||||
|
node_name = right_node.get("name", right_uuid[:8])
|
||||||
|
errors.append(
|
||||||
|
f"目标节点 '{node_name}' 的 target 端点 '{left_target_conn_key}' 不存在,"
|
||||||
|
f"支持的端点: {target_valid_keys}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return len(errors) == 0, errors
|
||||||
|
|
||||||
|
|
||||||
|
# action 到 resource_name 的映射
|
||||||
|
ACTION_RESOURCE_MAPPING: Dict[str, str] = {
|
||||||
|
# 生物实验操作
|
||||||
|
"transfer_liquid": "liquid_handler.prcxi",
|
||||||
|
"transfer": "liquid_handler.prcxi",
|
||||||
|
"incubation": "incubator.prcxi",
|
||||||
|
"move_labware": "labware_mover.prcxi",
|
||||||
|
"oscillation": "shaker.prcxi",
|
||||||
|
# 有机化学操作
|
||||||
|
"HeatChillToTemp": "heatchill.chemputer",
|
||||||
|
"StopHeatChill": "heatchill.chemputer",
|
||||||
|
"StartHeatChill": "heatchill.chemputer",
|
||||||
|
"HeatChill": "heatchill.chemputer",
|
||||||
|
"Dissolve": "stirrer.chemputer",
|
||||||
|
"Transfer": "liquid_handler.chemputer",
|
||||||
|
"Evaporate": "rotavap.chemputer",
|
||||||
|
"Recrystallize": "reactor.chemputer",
|
||||||
|
"Filter": "filter.chemputer",
|
||||||
|
"Dry": "dryer.chemputer",
|
||||||
|
"Add": "liquid_handler.chemputer",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_steps(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
将不同格式的步骤数据规范化为统一格式
|
||||||
|
|
||||||
|
支持的输入格式:
|
||||||
|
- action + parameters
|
||||||
|
- action + action_args
|
||||||
|
- operation + parameters
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: 原始步骤数据列表
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
规范化后的步骤列表,格式为 [{"action": str, "parameters": dict, "description": str?, "step_number": int?}, ...]
|
||||||
|
"""
|
||||||
|
normalized = []
|
||||||
|
for idx, step in enumerate(data):
|
||||||
|
# 获取动作名称(支持 action 或 operation 字段)
|
||||||
|
action = step.get("action") or step.get("operation")
|
||||||
|
if not action:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 获取参数(支持 parameters 或 action_args 字段)
|
||||||
|
raw_params = step.get("parameters") or step.get("action_args") or {}
|
||||||
|
params = dict(raw_params)
|
||||||
|
|
||||||
|
# 规范化 source/target -> sources/targets
|
||||||
|
if "source" in raw_params and "sources" not in raw_params:
|
||||||
|
params["sources"] = raw_params["source"]
|
||||||
|
if "target" in raw_params and "targets" not in raw_params:
|
||||||
|
params["targets"] = raw_params["target"]
|
||||||
|
|
||||||
|
# 获取描述(支持 description 或 purpose 字段)
|
||||||
|
description = step.get("description") or step.get("purpose")
|
||||||
|
|
||||||
|
# 获取步骤编号(优先使用原始数据中的 step_number,否则使用索引+1)
|
||||||
|
step_number = step.get("step_number", idx + 1)
|
||||||
|
|
||||||
|
step_dict = {"action": action, "parameters": params, "step_number": step_number}
|
||||||
|
if description:
|
||||||
|
step_dict["description"] = description
|
||||||
|
|
||||||
|
normalized.append(step_dict)
|
||||||
|
|
||||||
|
return normalized
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_labware(data: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
将不同格式的 labware 数据规范化为统一的字典格式
|
||||||
|
|
||||||
|
支持的输入格式:
|
||||||
|
- reagent_name + material_name + positions
|
||||||
|
- name + labware + slot
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: 原始 labware 数据列表
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
规范化后的 labware 字典,格式为 {name: {"slot": int, "labware": str, "well": list, "type": str, "role": str, "name": str}, ...}
|
||||||
|
"""
|
||||||
|
labware = {}
|
||||||
|
for item in data:
|
||||||
|
# 获取 key 名称(优先使用 reagent_name,其次是 material_name 或 name)
|
||||||
|
reagent_name = item.get("reagent_name")
|
||||||
|
key = reagent_name or item.get("material_name") or item.get("name")
|
||||||
|
if not key:
|
||||||
|
continue
|
||||||
|
|
||||||
|
key = str(key)
|
||||||
|
|
||||||
|
# 处理重复 key,自动添加后缀
|
||||||
|
idx = 1
|
||||||
|
original_key = key
|
||||||
|
while key in labware:
|
||||||
|
idx += 1
|
||||||
|
key = f"{original_key}_{idx}"
|
||||||
|
|
||||||
|
labware[key] = {
|
||||||
|
"slot": item.get("positions") or item.get("slot"),
|
||||||
|
"labware": item.get("material_name") or item.get("labware"),
|
||||||
|
"well": item.get("well", []),
|
||||||
|
"type": item.get("type", "reagent"),
|
||||||
|
"role": item.get("role", ""),
|
||||||
|
"name": key,
|
||||||
|
}
|
||||||
|
|
||||||
|
return labware
|
||||||
|
|
||||||
|
|
||||||
|
def convert_from_json(
|
||||||
|
data: Union[str, PathLike, Dict[str, Any]],
|
||||||
|
workstation_name: str = "PRCXi",
|
||||||
|
validate: bool = True,
|
||||||
|
) -> WorkflowGraph:
|
||||||
|
"""
|
||||||
|
从 JSON 数据或文件转换为 WorkflowGraph
|
||||||
|
|
||||||
|
支持的 JSON 格式:
|
||||||
|
1. {"workflow": [...], "reagent": {...}} - 直接格式
|
||||||
|
2. {"steps_info": [...], "labware_info": [...]} - 需要规范化的格式
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: JSON 文件路径、字典数据、或 JSON 字符串
|
||||||
|
workstation_name: 工作站名称,默认 "PRCXi"
|
||||||
|
validate: 是否校验句柄配置,默认 True
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
WorkflowGraph: 构建好的工作流图
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: 不支持的 JSON 格式 或 句柄校验失败
|
||||||
|
FileNotFoundError: 文件不存在
|
||||||
|
json.JSONDecodeError: JSON 解析失败
|
||||||
|
"""
|
||||||
|
# 处理输入数据
|
||||||
|
if isinstance(data, (str, PathLike)):
|
||||||
|
path = Path(data)
|
||||||
|
if path.exists():
|
||||||
|
with path.open("r", encoding="utf-8") as fp:
|
||||||
|
json_data = json.load(fp)
|
||||||
|
elif isinstance(data, str):
|
||||||
|
# 尝试作为 JSON 字符串解析
|
||||||
|
json_data = json.loads(data)
|
||||||
|
else:
|
||||||
|
raise FileNotFoundError(f"文件不存在: {data}")
|
||||||
|
elif isinstance(data, dict):
|
||||||
|
json_data = data
|
||||||
|
else:
|
||||||
|
raise TypeError(f"不支持的数据类型: {type(data)}")
|
||||||
|
|
||||||
|
# 根据格式解析数据
|
||||||
|
if "workflow" in json_data and "reagent" in json_data:
|
||||||
|
# 格式1: workflow/reagent(已经是规范格式)
|
||||||
|
protocol_steps = json_data["workflow"]
|
||||||
|
labware_info = json_data["reagent"]
|
||||||
|
elif "steps_info" in json_data and "labware_info" in json_data:
|
||||||
|
# 格式2: steps_info/labware_info(需要规范化)
|
||||||
|
protocol_steps = normalize_steps(json_data["steps_info"])
|
||||||
|
labware_info = normalize_labware(json_data["labware_info"])
|
||||||
|
elif "steps" in json_data and "labware" in json_data:
|
||||||
|
# 格式3: steps/labware(另一种常见格式)
|
||||||
|
protocol_steps = normalize_steps(json_data["steps"])
|
||||||
|
if isinstance(json_data["labware"], list):
|
||||||
|
labware_info = normalize_labware(json_data["labware"])
|
||||||
|
else:
|
||||||
|
labware_info = json_data["labware"]
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"不支持的 JSON 格式。支持的格式:\n"
|
||||||
|
"1. {'workflow': [...], 'reagent': {...}}\n"
|
||||||
|
"2. {'steps_info': [...], 'labware_info': [...]}\n"
|
||||||
|
"3. {'steps': [...], 'labware': [...]}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# 构建工作流图
|
||||||
|
graph = build_protocol_graph(
|
||||||
|
labware_info=labware_info,
|
||||||
|
protocol_steps=protocol_steps,
|
||||||
|
workstation_name=workstation_name,
|
||||||
|
action_resource_mapping=ACTION_RESOURCE_MAPPING,
|
||||||
|
)
|
||||||
|
|
||||||
|
# 校验句柄配置
|
||||||
|
if validate:
|
||||||
|
is_valid, errors = validate_workflow_handles(graph)
|
||||||
|
if not is_valid:
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
for error in errors:
|
||||||
|
warnings.warn(f"句柄校验警告: {error}")
|
||||||
|
|
||||||
|
return graph
|
||||||
|
|
||||||
|
|
||||||
|
def convert_json_to_node_link(
|
||||||
|
data: Union[str, PathLike, Dict[str, Any]],
|
||||||
|
workstation_name: str = "PRCXi",
|
||||||
|
) -> Dict[str, Any]:
|
||||||
|
"""
|
||||||
|
将 JSON 数据转换为 node-link 格式的字典
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: JSON 文件路径、字典数据、或 JSON 字符串
|
||||||
|
workstation_name: 工作站名称,默认 "PRCXi"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: node-link 格式的工作流数据
|
||||||
|
"""
|
||||||
|
graph = convert_from_json(data, workstation_name)
|
||||||
|
return graph.to_node_link_dict()
|
||||||
|
|
||||||
|
|
||||||
|
def convert_json_to_workflow_list(
|
||||||
|
data: Union[str, PathLike, Dict[str, Any]],
|
||||||
|
workstation_name: str = "PRCXi",
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
将 JSON 数据转换为工作流列表格式
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data: JSON 文件路径、字典数据、或 JSON 字符串
|
||||||
|
workstation_name: 工作站名称,默认 "PRCXi"
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List: 工作流节点列表
|
||||||
|
"""
|
||||||
|
graph = convert_from_json(data, workstation_name)
|
||||||
|
return graph.to_dict()
|
||||||
|
|
||||||
|
|
||||||
|
# 为了向后兼容,保留下划线前缀的别名
|
||||||
|
_normalize_steps = normalize_steps
|
||||||
|
_normalize_labware = normalize_labware
|
||||||
@@ -2,7 +2,7 @@
|
|||||||
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
|
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
|
||||||
<package format="3">
|
<package format="3">
|
||||||
<name>unilabos_msgs</name>
|
<name>unilabos_msgs</name>
|
||||||
<version>0.10.15</version>
|
<version>0.10.17</version>
|
||||||
<description>ROS2 Messages package for unilabos devices</description>
|
<description>ROS2 Messages package for unilabos devices</description>
|
||||||
<maintainer email="changjh@pku.edu.cn">Junhan Chang</maintainer>
|
<maintainer email="changjh@pku.edu.cn">Junhan Chang</maintainer>
|
||||||
<maintainer email="18435084+Xuwznln@users.noreply.github.com">Xuwznln</maintainer>
|
<maintainer email="18435084+Xuwznln@users.noreply.github.com">Xuwznln</maintainer>
|
||||||
|
|||||||
Reference in New Issue
Block a user