diff --git a/.conda/base/recipe.yaml b/.conda/base/recipe.yaml new file mode 100644 index 0000000..3c5bb88 --- /dev/null +++ b/.conda/base/recipe.yaml @@ -0,0 +1,60 @@ +# unilabos: Production package (depends on unilabos-env + pip unilabos) +# For production deployment + +package: + name: unilabos + version: 0.10.17 + +source: + path: ../../unilabos + target_directory: unilabos + +build: + python: + entry_points: + - unilab = unilabos.app.main:main + script: + - set PIP_NO_INDEX= + - if: win + then: + - copy %RECIPE_DIR%\..\..\MANIFEST.in %SRC_DIR% + - copy %RECIPE_DIR%\..\..\setup.cfg %SRC_DIR% + - copy %RECIPE_DIR%\..\..\setup.py %SRC_DIR% + - pip install %SRC_DIR% + - if: unix + then: + - cp $RECIPE_DIR/../../MANIFEST.in $SRC_DIR + - cp $RECIPE_DIR/../../setup.cfg $SRC_DIR + - cp $RECIPE_DIR/../../setup.py $SRC_DIR + - pip install $SRC_DIR + +requirements: + host: + - python ==3.11.14 + - pip + - setuptools + - zstd + - zstandard + run: + - zstd + - zstandard + - networkx + - typing_extensions + - websockets + - pint + - fastapi + - jinja2 + - requests + - uvicorn + - opcua + - pyserial + - pandas + - pymodbus + - matplotlib + - pylibftdi + - uni-lab::unilabos-env ==0.10.17 + +about: + repository: https://github.com/deepmodeling/Uni-Lab-OS + license: GPL-3.0-only + description: "UniLabOS - Production package with minimal ROS2 dependencies" diff --git a/.conda/environment/recipe.yaml b/.conda/environment/recipe.yaml new file mode 100644 index 0000000..3f8df0f --- /dev/null +++ b/.conda/environment/recipe.yaml @@ -0,0 +1,39 @@ +# unilabos-env: conda environment dependencies (ROS2 + conda packages) + +package: + name: unilabos-env + version: 0.10.17 + +build: + noarch: generic + +requirements: + run: + # Python + - zstd + - zstandard + - conda-forge::python ==3.11.14 + - conda-forge::opencv + # ROS2 dependencies (from ci-check.yml) + - robostack-staging::ros-humble-ros-core + - robostack-staging::ros-humble-action-msgs + - robostack-staging::ros-humble-std-msgs + - robostack-staging::ros-humble-geometry-msgs + - robostack-staging::ros-humble-control-msgs + - robostack-staging::ros-humble-nav2-msgs + - robostack-staging::ros-humble-cv-bridge + - robostack-staging::ros-humble-vision-opencv + - robostack-staging::ros-humble-tf-transformations + - robostack-staging::ros-humble-moveit-msgs + - robostack-staging::ros-humble-tf2-ros + - robostack-staging::ros-humble-tf2-ros-py + - conda-forge::transforms3d + - conda-forge::uv + + # UniLabOS custom messages + - uni-lab::ros-humble-unilabos-msgs + +about: + repository: https://github.com/deepmodeling/Uni-Lab-OS + license: GPL-3.0-only + description: "UniLabOS Environment - ROS2 and conda dependencies" diff --git a/.conda/full/recipe.yaml b/.conda/full/recipe.yaml new file mode 100644 index 0000000..037f5b4 --- /dev/null +++ b/.conda/full/recipe.yaml @@ -0,0 +1,42 @@ +# unilabos-full: Full package with all features +# Depends on unilabos + complete ROS2 desktop + dev tools + +package: + name: unilabos-full + version: 0.10.17 + +build: + noarch: generic + +requirements: + run: + # Base unilabos package (includes unilabos-env) + - uni-lab::unilabos ==0.10.17 + # Documentation tools + - sphinx + - sphinx_rtd_theme + # Web UI + - gradio + - flask + # Interactive development + - ipython + - jupyter + - jupyros + - colcon-common-extensions + # ROS2 full desktop (includes rviz2, gazebo, etc.) + - robostack-staging::ros-humble-desktop-full + # Navigation and motion control + - ros-humble-navigation2 + - ros-humble-ros2-control + - ros-humble-robot-state-publisher + - ros-humble-joint-state-publisher + # MoveIt motion planning + - ros-humble-moveit + - ros-humble-moveit-servo + # Simulation + - ros-humble-simulation + +about: + repository: https://github.com/deepmodeling/Uni-Lab-OS + license: GPL-3.0-only + description: "UniLabOS Full - Complete package with ROS2 Desktop, MoveIt, Navigation2, Gazebo, Jupyter" diff --git a/.conda/recipe.yaml b/.conda/recipe.yaml deleted file mode 100644 index 2b041c8..0000000 --- a/.conda/recipe.yaml +++ /dev/null @@ -1,91 +0,0 @@ -package: - name: unilabos - version: 0.10.15 - -source: - path: ../unilabos - target_directory: unilabos - -build: - python: - entry_points: - - unilab = unilabos.app.main:main - script: - - set PIP_NO_INDEX= - - if: win - then: - - copy %RECIPE_DIR%\..\MANIFEST.in %SRC_DIR% - - copy %RECIPE_DIR%\..\setup.cfg %SRC_DIR% - - copy %RECIPE_DIR%\..\setup.py %SRC_DIR% - - call %PYTHON% -m pip install %SRC_DIR% - - if: unix - then: - - cp $RECIPE_DIR/../MANIFEST.in $SRC_DIR - - cp $RECIPE_DIR/../setup.cfg $SRC_DIR - - cp $RECIPE_DIR/../setup.py $SRC_DIR - - $PYTHON -m pip install $SRC_DIR - -requirements: - host: - - python ==3.11.11 - - pip - - setuptools - - zstd - - zstandard - run: - - conda-forge::python ==3.11.11 - - compilers - - cmake - - zstd - - zstandard - - ninja - - if: unix - then: - - make - - sphinx - - sphinx_rtd_theme - - numpy - - scipy - - pandas - - networkx - - matplotlib - - pint - - pyserial - - pyusb - - pylibftdi - - pymodbus - - python-can - - pyvisa - - opencv - - pydantic - - fastapi - - uvicorn - - gradio - - flask - - websockets - - ipython - - jupyter - - jupyros - - colcon-common-extensions - - robostack-staging::ros-humble-desktop-full - - robostack-staging::ros-humble-control-msgs - - robostack-staging::ros-humble-sensor-msgs - - robostack-staging::ros-humble-trajectory-msgs - - ros-humble-navigation2 - - ros-humble-ros2-control - - ros-humble-robot-state-publisher - - ros-humble-joint-state-publisher - - ros-humble-rosbridge-server - - ros-humble-cv-bridge - - ros-humble-tf2 - - ros-humble-moveit - - ros-humble-moveit-servo - - ros-humble-simulation - - ros-humble-tf-transformations - - transforms3d - - uni-lab::ros-humble-unilabos-msgs - -about: - repository: https://github.com/deepmodeling/Uni-Lab-OS - license: GPL-3.0-only - description: "Uni-Lab-OS" diff --git a/.conda/scripts/post-link.bat b/.conda/scripts/post-link.bat deleted file mode 100644 index 352b78c..0000000 --- a/.conda/scripts/post-link.bat +++ /dev/null @@ -1,9 +0,0 @@ -@echo off -setlocal enabledelayedexpansion - -REM upgrade pip -"%PREFIX%\python.exe" -m pip install --upgrade pip - -REM install extra deps -"%PREFIX%\python.exe" -m pip install paho-mqtt opentrons_shared_data -"%PREFIX%\python.exe" -m pip install git+https://github.com/Xuwznln/pylabrobot.git diff --git a/.conda/scripts/post-link.sh b/.conda/scripts/post-link.sh deleted file mode 100644 index ef96f15..0000000 --- a/.conda/scripts/post-link.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash -set -euxo pipefail - -# make sure pip is available -"$PREFIX/bin/python" -m pip install --upgrade pip - -# install extra deps -"$PREFIX/bin/python" -m pip install paho-mqtt opentrons_shared_data -"$PREFIX/bin/python" -m pip install git+https://github.com/Xuwznln/pylabrobot.git diff --git a/.cursorignore b/.cursorignore deleted file mode 100644 index 7b0d4f9..0000000 --- a/.cursorignore +++ /dev/null @@ -1,26 +0,0 @@ -.conda -# .github -.idea -# .vscode -output -pylabrobot_repo -recipes -scripts -service -temp -# unilabos/test -# unilabos/app/web -unilabos/device_mesh -unilabos_data -unilabos_msgs -unilabos.egg-info -CONTRIBUTORS -# LICENSE -MANIFEST.in -pyrightconfig.json -# README.md -# README_zh.md -setup.py -setup.cfg -.gitattrubutes -**/__pycache__ diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..20a5faa --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,19 @@ +version: 2 +updates: +# GitHub Actions +- package-ecosystem: "github-actions" + directory: "/" + target-branch: "dev" + schedule: + interval: "weekly" + day: "monday" + time: "06:00" + open-pull-requests-limit: 5 + reviewers: + - "msgcenterpy-team" + labels: + - "dependencies" + - "github-actions" + commit-message: + prefix: "ci" + include: "scope" diff --git a/.github/workflows/ci-check.yml b/.github/workflows/ci-check.yml new file mode 100644 index 0000000..57245d9 --- /dev/null +++ b/.github/workflows/ci-check.yml @@ -0,0 +1,67 @@ +name: CI Check + +on: + push: + branches: [main, dev] + pull_request: + branches: [main, dev] + +jobs: + registry-check: + runs-on: windows-latest + + env: + # Fix Unicode encoding issue on Windows runner (cp1252 -> utf-8) + PYTHONIOENCODING: utf-8 + PYTHONUTF8: 1 + + defaults: + run: + shell: cmd + + steps: + - uses: actions/checkout@v6 + with: + fetch-depth: 0 + + - name: Setup Miniforge + uses: conda-incubator/setup-miniconda@v3 + with: + miniforge-version: latest + use-mamba: true + channels: robostack-staging,conda-forge,uni-lab + channel-priority: flexible + activate-environment: check-env + auto-update-conda: false + show-channel-urls: true + + - name: Install ROS dependencies, uv and unilabos-msgs + run: | + echo Installing ROS dependencies... + mamba install -n check-env conda-forge::uv conda-forge::opencv robostack-staging::ros-humble-ros-core robostack-staging::ros-humble-action-msgs robostack-staging::ros-humble-std-msgs robostack-staging::ros-humble-geometry-msgs robostack-staging::ros-humble-control-msgs robostack-staging::ros-humble-nav2-msgs uni-lab::ros-humble-unilabos-msgs robostack-staging::ros-humble-cv-bridge robostack-staging::ros-humble-vision-opencv robostack-staging::ros-humble-tf-transformations robostack-staging::ros-humble-moveit-msgs robostack-staging::ros-humble-tf2-ros robostack-staging::ros-humble-tf2-ros-py conda-forge::transforms3d -c robostack-staging -c conda-forge -c uni-lab -y + + - name: Install pip dependencies and unilabos + run: | + call conda activate check-env + echo Installing pip dependencies... + uv pip install -r unilabos/utils/requirements.txt + uv pip install pywinauto git+https://github.com/Xuwznln/pylabrobot.git + uv pip uninstall enum34 || echo enum34 not installed, skipping + uv pip install . + + - name: Run check mode (complete_registry) + run: | + call conda activate check-env + echo Running check mode... + python -m unilabos --check_mode --skip_env_check + + - name: Check for uncommitted changes + shell: bash + run: | + if ! git diff --exit-code; then + echo "::error::检测到文件变化!请先在本地运行 'python -m unilabos --complete_registry' 并提交变更" + echo "变化的文件:" + git diff --name-only + exit 1 + fi + echo "检查通过:无文件变化" diff --git a/.github/workflows/conda-pack-build.yml b/.github/workflows/conda-pack-build.yml index 3a379fa..ed45db9 100644 --- a/.github/workflows/conda-pack-build.yml +++ b/.github/workflows/conda-pack-build.yml @@ -13,6 +13,11 @@ on: required: false default: 'win-64' type: string + build_full: + description: '是否构建完整版 unilabos-full (默认构建轻量版 unilabos)' + required: false + default: false + type: boolean jobs: build-conda-pack: @@ -57,7 +62,7 @@ jobs: echo "should_build=false" >> $GITHUB_OUTPUT fi - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 if: steps.should_build.outputs.should_build == 'true' with: ref: ${{ github.event.inputs.branch }} @@ -69,7 +74,7 @@ jobs: with: miniforge-version: latest use-mamba: true - python-version: '3.11.11' + python-version: '3.11.14' channels: conda-forge,robostack-staging,uni-lab,defaults channel-priority: flexible activate-environment: unilab @@ -81,7 +86,14 @@ jobs: run: | echo Installing unilabos and dependencies to unilab environment... echo Using mamba for faster and more reliable dependency resolution... - mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y + echo Build full: ${{ github.event.inputs.build_full }} + if "${{ github.event.inputs.build_full }}"=="true" ( + echo Installing unilabos-full ^(complete package^)... + mamba install -n unilab uni-lab::unilabos-full conda-pack -c uni-lab -c robostack-staging -c conda-forge -y + ) else ( + echo Installing unilabos ^(minimal package^)... + mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y + ) - name: Install conda-pack, unilabos and dependencies (Unix) if: steps.should_build.outputs.should_build == 'true' && matrix.platform != 'win-64' @@ -89,7 +101,14 @@ jobs: run: | echo "Installing unilabos and dependencies to unilab environment..." echo "Using mamba for faster and more reliable dependency resolution..." - mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y + echo "Build full: ${{ github.event.inputs.build_full }}" + if [[ "${{ github.event.inputs.build_full }}" == "true" ]]; then + echo "Installing unilabos-full (complete package)..." + mamba install -n unilab uni-lab::unilabos-full conda-pack -c uni-lab -c robostack-staging -c conda-forge -y + else + echo "Installing unilabos (minimal package)..." + mamba install -n unilab uni-lab::unilabos conda-pack -c uni-lab -c robostack-staging -c conda-forge -y + fi - name: Get latest ros-humble-unilabos-msgs version (Windows) if: steps.should_build.outputs.should_build == 'true' && matrix.platform == 'win-64' @@ -293,7 +312,7 @@ jobs: - name: Upload distribution package if: steps.should_build.outputs.should_build == 'true' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: unilab-pack-${{ matrix.platform }}-${{ github.event.inputs.branch }} path: dist-package/ @@ -308,7 +327,12 @@ jobs: echo ========================================== echo Platform: ${{ matrix.platform }} echo Branch: ${{ github.event.inputs.branch }} - echo Python version: 3.11.11 + echo Python version: 3.11.14 + if "${{ github.event.inputs.build_full }}"=="true" ( + echo Package: unilabos-full ^(complete^) + ) else ( + echo Package: unilabos ^(minimal^) + ) echo. echo Distribution package contents: dir dist-package @@ -328,7 +352,12 @@ jobs: echo "==========================================" echo "Platform: ${{ matrix.platform }}" echo "Branch: ${{ github.event.inputs.branch }}" - echo "Python version: 3.11.11" + echo "Python version: 3.11.14" + if [[ "${{ github.event.inputs.build_full }}" == "true" ]]; then + echo "Package: unilabos-full (complete)" + else + echo "Package: unilabos (minimal)" + fi echo "" echo "Distribution package contents:" ls -lh dist-package/ diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index 66aef8d..f3ac4d1 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -1,10 +1,12 @@ name: Deploy Docs on: - push: - branches: [main] - pull_request: + # 在 CI Check 成功后自动触发(仅 main 分支) + workflow_run: + workflows: ["CI Check"] + types: [completed] branches: [main] + # 手动触发 workflow_dispatch: inputs: branch: @@ -33,12 +35,19 @@ concurrency: jobs: # Build documentation build: + # 只在以下情况运行: + # 1. workflow_run 触发且 CI Check 成功 + # 2. 手动触发 + if: | + github.event_name == 'workflow_dispatch' || + (github.event_name == 'workflow_run' && github.event.workflow_run.conclusion == 'success') runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v6 with: - ref: ${{ github.event.inputs.branch || github.ref }} + # workflow_run 时使用触发工作流的分支,手动触发时使用输入的分支 + ref: ${{ github.event.workflow_run.head_branch || github.event.inputs.branch || github.ref }} fetch-depth: 0 - name: Setup Miniforge (with mamba) @@ -46,7 +55,7 @@ jobs: with: miniforge-version: latest use-mamba: true - python-version: '3.11.11' + python-version: '3.11.14' channels: conda-forge,robostack-staging,uni-lab,defaults channel-priority: flexible activate-environment: unilab @@ -75,8 +84,10 @@ jobs: - name: Setup Pages id: pages - uses: actions/configure-pages@v4 - if: github.ref == 'refs/heads/main' || (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true') + uses: actions/configure-pages@v5 + if: | + github.event.workflow_run.head_branch == 'main' || + (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true') - name: Build Sphinx documentation run: | @@ -94,14 +105,18 @@ jobs: test -f docs/_build/html/index.html && echo "✓ index.html exists" || echo "✗ index.html missing" - name: Upload build artifacts - uses: actions/upload-pages-artifact@v3 - if: github.ref == 'refs/heads/main' || (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true') + uses: actions/upload-pages-artifact@v4 + if: | + github.event.workflow_run.head_branch == 'main' || + (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true') with: path: docs/_build/html # Deploy to GitHub Pages deploy: - if: github.ref == 'refs/heads/main' || (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true') + if: | + github.event.workflow_run.head_branch == 'main' || + (github.event_name == 'workflow_dispatch' && github.event.inputs.deploy_to_pages == 'true') environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} diff --git a/.github/workflows/multi-platform-build.yml b/.github/workflows/multi-platform-build.yml index bcba6db..4e1cf4f 100644 --- a/.github/workflows/multi-platform-build.yml +++ b/.github/workflows/multi-platform-build.yml @@ -1,11 +1,16 @@ name: Multi-Platform Conda Build on: + # 在 CI Check 工作流完成后触发(仅限 main/dev 分支) + workflow_run: + workflows: ["CI Check"] + types: + - completed + branches: [main, dev] + # 支持 tag 推送(不依赖 CI Check) push: - branches: [main, dev] tags: ['v*'] - pull_request: - branches: [main, dev] + # 手动触发 workflow_dispatch: inputs: platforms: @@ -17,9 +22,37 @@ on: required: false default: false type: boolean + skip_ci_check: + description: '跳过等待 CI Check (手动触发时可选)' + required: false + default: false + type: boolean jobs: + # 等待 CI Check 完成的 job (仅用于 workflow_run 触发) + wait-for-ci: + runs-on: ubuntu-latest + if: github.event_name == 'workflow_run' + outputs: + should_continue: ${{ steps.check.outputs.should_continue }} + steps: + - name: Check CI status + id: check + run: | + if [[ "${{ github.event.workflow_run.conclusion }}" == "success" ]]; then + echo "should_continue=true" >> $GITHUB_OUTPUT + echo "CI Check passed, proceeding with build" + else + echo "should_continue=false" >> $GITHUB_OUTPUT + echo "CI Check did not succeed (status: ${{ github.event.workflow_run.conclusion }}), skipping build" + fi + build: + needs: [wait-for-ci] + # 运行条件:workflow_run 触发且 CI 成功,或者其他触发方式 + if: | + always() && + (needs.wait-for-ci.result == 'skipped' || needs.wait-for-ci.outputs.should_continue == 'true') strategy: fail-fast: false matrix: @@ -44,8 +77,10 @@ jobs: shell: bash -l {0} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: + # 如果是 workflow_run 触发,使用触发 CI Check 的 commit + ref: ${{ github.event.workflow_run.head_sha || github.ref }} fetch-depth: 0 - name: Check if platform should be built @@ -69,7 +104,6 @@ jobs: channels: conda-forge,robostack-staging,defaults channel-priority: strict activate-environment: build-env - auto-activate-base: false auto-update-conda: false show-channel-urls: true @@ -115,7 +149,7 @@ jobs: - name: Upload conda package artifacts if: steps.should_build.outputs.should_build == 'true' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: conda-package-${{ matrix.platform }} path: conda-packages-temp diff --git a/.github/workflows/unilabos-conda-build.yml b/.github/workflows/unilabos-conda-build.yml index 214f9bf..d116a67 100644 --- a/.github/workflows/unilabos-conda-build.yml +++ b/.github/workflows/unilabos-conda-build.yml @@ -1,25 +1,62 @@ name: UniLabOS Conda Build on: + # 在 CI Check 成功后自动触发 + workflow_run: + workflows: ["CI Check"] + types: [completed] + branches: [main, dev] + # 标签推送时直接触发(发布版本) push: - branches: [main, dev] tags: ['v*'] - pull_request: - branches: [main, dev] + # 手动触发 workflow_dispatch: inputs: platforms: description: '选择构建平台 (逗号分隔): linux-64, osx-64, osx-arm64, win-64' required: false default: 'linux-64' + build_full: + description: '是否构建 unilabos-full 完整包 (默认只构建 unilabos 基础包)' + required: false + default: false + type: boolean upload_to_anaconda: description: '是否上传到Anaconda.org' required: false default: false type: boolean + skip_ci_check: + description: '跳过等待 CI Check (手动触发时可选)' + required: false + default: false + type: boolean jobs: + # 等待 CI Check 完成的 job (仅用于 workflow_run 触发) + wait-for-ci: + runs-on: ubuntu-latest + if: github.event_name == 'workflow_run' + outputs: + should_continue: ${{ steps.check.outputs.should_continue }} + steps: + - name: Check CI status + id: check + run: | + if [[ "${{ github.event.workflow_run.conclusion }}" == "success" ]]; then + echo "should_continue=true" >> $GITHUB_OUTPUT + echo "CI Check passed, proceeding with build" + else + echo "should_continue=false" >> $GITHUB_OUTPUT + echo "CI Check did not succeed (status: ${{ github.event.workflow_run.conclusion }}), skipping build" + fi + build: + needs: [wait-for-ci] + # 运行条件:workflow_run 触发且 CI 成功,或者其他触发方式 + if: | + always() && + (needs.wait-for-ci.result == 'skipped' || needs.wait-for-ci.outputs.should_continue == 'true') strategy: fail-fast: false matrix: @@ -40,8 +77,10 @@ jobs: shell: bash -l {0} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: + # 如果是 workflow_run 触发,使用触发 CI Check 的 commit + ref: ${{ github.event.workflow_run.head_sha || github.ref }} fetch-depth: 0 - name: Check if platform should be built @@ -65,7 +104,6 @@ jobs: channels: conda-forge,robostack-staging,uni-lab,defaults channel-priority: strict activate-environment: build-env - auto-activate-base: false auto-update-conda: false show-channel-urls: true @@ -81,12 +119,61 @@ jobs: conda list | grep -E "(rattler-build|anaconda-client)" echo "Platform: ${{ matrix.platform }}" echo "OS: ${{ matrix.os }}" - echo "Building UniLabOS package" + echo "Build full package: ${{ github.event.inputs.build_full || 'false' }}" + echo "Building packages:" + echo " - unilabos-env (environment dependencies)" + echo " - unilabos (with pip package)" + if [[ "${{ github.event.inputs.build_full }}" == "true" ]]; then + echo " - unilabos-full (complete package)" + fi - - name: Build conda package + - name: Build unilabos-env (conda environment only, noarch) if: steps.should_build.outputs.should_build == 'true' run: | - rattler-build build -r .conda/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge + echo "Building unilabos-env (conda environment dependencies)..." + rattler-build build -r .conda/environment/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge + + - name: Upload unilabos-env to Anaconda.org (if enabled) + if: steps.should_build.outputs.should_build == 'true' && github.event.inputs.upload_to_anaconda == 'true' + run: | + echo "Uploading unilabos-env to uni-lab organization..." + for package in $(find ./output -name "unilabos-env*.conda"); do + anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package" + done + + - name: Build unilabos (with pip package) + if: steps.should_build.outputs.should_build == 'true' + run: | + echo "Building unilabos package..." + # 如果已上传到 Anaconda,从 uni-lab channel 获取 unilabos-env;否则从本地 output 获取 + rattler-build build -r .conda/base/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge --channel ./output + + - name: Upload unilabos to Anaconda.org (if enabled) + if: steps.should_build.outputs.should_build == 'true' && github.event.inputs.upload_to_anaconda == 'true' + run: | + echo "Uploading unilabos to uni-lab organization..." + for package in $(find ./output -name "unilabos-0*.conda" -o -name "unilabos-[0-9]*.conda"); do + anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package" + done + + - name: Build unilabos-full - Only when explicitly requested + if: | + steps.should_build.outputs.should_build == 'true' && + github.event.inputs.build_full == 'true' + run: | + echo "Building unilabos-full package on ${{ matrix.platform }}..." + rattler-build build -r .conda/full/recipe.yaml -c uni-lab -c robostack-staging -c conda-forge --channel ./output + + - name: Upload unilabos-full to Anaconda.org (if enabled) + if: | + steps.should_build.outputs.should_build == 'true' && + github.event.inputs.build_full == 'true' && + github.event.inputs.upload_to_anaconda == 'true' + run: | + echo "Uploading unilabos-full to uni-lab organization..." + for package in $(find ./output -name "unilabos-full*.conda"); do + anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package" + done - name: List built packages if: steps.should_build.outputs.should_build == 'true' @@ -108,17 +195,9 @@ jobs: - name: Upload conda package artifacts if: steps.should_build.outputs.should_build == 'true' - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: conda-package-unilabos-${{ matrix.platform }} path: conda-packages-temp if-no-files-found: warn retention-days: 30 - - - name: Upload to Anaconda.org (uni-lab organization) - if: github.event.inputs.upload_to_anaconda == 'true' - run: | - for package in $(find ./output -name "*.conda"); do - echo "Uploading $package to uni-lab organization..." - anaconda -t ${{ secrets.ANACONDA_API_TOKEN }} upload --user uni-lab --force "$package" - done diff --git a/.gitignore b/.gitignore index 610be61..838331e 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ temp/ output/ unilabos_data/ pyrightconfig.json +.cursorignore ## Python # Byte-compiled / optimized / DLL files diff --git a/MANIFEST.in b/MANIFEST.in index d81945e..156ca52 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,5 @@ recursive-include unilabos/test * +recursive-include unilabos/utils * recursive-include unilabos/registry *.yaml recursive-include unilabos/app/web/static * recursive-include unilabos/app/web/templates * diff --git a/README.md b/README.md index f10cc0f..fa0d9dd 100644 --- a/README.md +++ b/README.md @@ -31,26 +31,46 @@ Detailed documentation can be found at: ## Quick Start -1. Setup Conda Environment +### 1. Setup Conda Environment -Uni-Lab-OS recommends using `mamba` for environment management: +Uni-Lab-OS recommends using `mamba` for environment management. Choose the package that fits your needs: + +| Package | Use Case | Contents | +|---------|----------|----------| +| `unilabos` | **Recommended for most users** | Complete package, ready to use | +| `unilabos-env` | Developers (editable install) | Environment only, install unilabos via pip | +| `unilabos-full` | Simulation/Visualization | unilabos + ROS2 Desktop + Gazebo + MoveIt | ```bash # Create new environment -mamba create -n unilab python=3.11.11 +mamba create -n unilab python=3.11.14 mamba activate unilab -mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge + +# Option A: Standard installation (recommended for most users) +mamba install uni-lab::unilabos -c robostack-staging -c conda-forge + +# Option B: For developers (editable mode development) +mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge +# Then install unilabos and dependencies: +git clone https://github.com/deepmodeling/Uni-Lab-OS.git && cd Uni-Lab-OS +pip install -e . +uv pip install -r unilabos/utils/requirements.txt + +# Option C: Full installation (simulation/visualization) +mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge ``` -2. Install Dev Uni-Lab-OS +**When to use which?** +- **unilabos**: Standard installation for production deployment and general usage (recommended) +- **unilabos-env**: For developers who need `pip install -e .` editable mode, modify source code +- **unilabos-full**: For simulation (Gazebo), visualization (rviz2), and Jupyter notebooks + +### 2. Clone Repository (Optional, for developers) ```bash -# Clone the repository +# Clone the repository (only needed for development or examples) git clone https://github.com/deepmodeling/Uni-Lab-OS.git cd Uni-Lab-OS - -# Install Uni-Lab-OS -pip install . ``` 3. Start Uni-Lab System diff --git a/README_zh.md b/README_zh.md index c4dba7d..20b8f53 100644 --- a/README_zh.md +++ b/README_zh.md @@ -31,26 +31,46 @@ Uni-Lab-OS 是一个用于实验室自动化的综合平台,旨在连接和控 ## 快速开始 -1. 配置 Conda 环境 +### 1. 配置 Conda 环境 -Uni-Lab-OS 建议使用 `mamba` 管理环境。根据您的操作系统选择适当的环境文件: +Uni-Lab-OS 建议使用 `mamba` 管理环境。根据您的需求选择合适的安装包: + +| 安装包 | 适用场景 | 包含内容 | +|--------|----------|----------| +| `unilabos` | **推荐大多数用户** | 完整安装包,开箱即用 | +| `unilabos-env` | 开发者(可编辑安装) | 仅环境依赖,通过 pip 安装 unilabos | +| `unilabos-full` | 仿真/可视化 | unilabos + ROS2 桌面版 + Gazebo + MoveIt | ```bash # 创建新环境 -mamba create -n unilab python=3.11.11 +mamba create -n unilab python=3.11.14 mamba activate unilab -mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge + +# 方案 A:标准安装(推荐大多数用户) +mamba install uni-lab::unilabos -c robostack-staging -c conda-forge + +# 方案 B:开发者环境(可编辑模式开发) +mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge +# 然后安装 unilabos 和依赖: +git clone https://github.com/deepmodeling/Uni-Lab-OS.git && cd Uni-Lab-OS +pip install -e . +uv pip install -r unilabos/utils/requirements.txt + +# 方案 C:完整安装(仿真/可视化) +mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge ``` -2. 安装开发版 Uni-Lab-OS: +**如何选择?** +- **unilabos**:标准安装,适用于生产部署和日常使用(推荐) +- **unilabos-env**:开发者使用,支持 `pip install -e .` 可编辑模式,可修改源代码 +- **unilabos-full**:需要仿真(Gazebo)、可视化(rviz2)或 Jupyter Notebook + +### 2. 克隆仓库(可选,供开发者使用) ```bash -# 克隆仓库 +# 克隆仓库(仅开发或查看示例时需要) git clone https://github.com/deepmodeling/Uni-Lab-OS.git cd Uni-Lab-OS - -# 安装 Uni-Lab-OS -pip install . ``` 3. 启动 Uni-Lab 系统 diff --git a/docs/user_guide/best_practice.md b/docs/user_guide/best_practice.md index e1ffc24..767dc4d 100644 --- a/docs/user_guide/best_practice.md +++ b/docs/user_guide/best_practice.md @@ -31,6 +31,14 @@ 详细的安装步骤请参考 [安装指南](installation.md)。 +**选择合适的安装包:** + +| 安装包 | 适用场景 | 包含组件 | +|--------|----------|----------| +| `unilabos` | **推荐大多数用户**,生产部署 | 完整安装包,开箱即用 | +| `unilabos-env` | 开发者(可编辑安装) | 仅环境依赖,通过 pip 安装 unilabos | +| `unilabos-full` | 仿真/可视化 | unilabos + 完整 ROS2 桌面版 + Gazebo + MoveIt | + **关键步骤:** ```bash @@ -38,15 +46,30 @@ # 下载 Miniforge: https://github.com/conda-forge/miniforge/releases # 2. 创建 Conda 环境 -mamba create -n unilab python=3.11.11 +mamba create -n unilab python=3.11.14 # 3. 激活环境 mamba activate unilab -# 4. 安装 Uni-Lab-OS +# 4. 安装 Uni-Lab-OS(选择其一) + +# 方案 A:标准安装(推荐大多数用户) mamba install uni-lab::unilabos -c robostack-staging -c conda-forge + +# 方案 B:开发者环境(可编辑模式开发) +mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge +pip install -e /path/to/Uni-Lab-OS # 可编辑安装 +uv pip install -r unilabos/utils/requirements.txt # 安装 pip 依赖 + +# 方案 C:完整版(仿真/可视化) +mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge ``` +**选择建议:** +- **日常使用/生产部署**:使用 `unilabos`(推荐),完整功能,开箱即用 +- **开发者**:使用 `unilabos-env` + `pip install -e .` + `uv pip install -r unilabos/utils/requirements.txt`,代码修改立即生效 +- **仿真/可视化**:使用 `unilabos-full`,含 Gazebo、rviz2、MoveIt + #### 1.2 验证安装 ```bash @@ -416,6 +439,9 @@ unilab --ak your_ak --sk your_sk -g test/experiments/mock_devices/mock_all.json 1. 访问 Web 界面,进入"仪器耗材"模块 2. 在"仪器设备"区域找到并添加上述设备 3. 在"物料耗材"区域找到并添加容器 +4. 在workstation中配置protocol_type包含PumpTransferProtocol + +![添加Protocol类型](image/add_protocol.png) ![物料列表](image/material.png) @@ -768,7 +794,43 @@ Waiting for host service... 详细的设备驱动编写指南请参考 [添加设备驱动](../developer_guide/add_device.md)。 -#### 9.1 为什么需要自定义设备? +#### 9.1 开发环境准备 + +**推荐使用 `unilabos-env` + `pip install -e .` + `uv pip install`** 进行设备开发: + +```bash +# 1. 创建环境并安装 unilabos-env(ROS2 + conda 依赖 + uv) +mamba create -n unilab python=3.11.14 +conda activate unilab +mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge + +# 2. 克隆代码 +git clone https://github.com/deepmodeling/Uni-Lab-OS.git +cd Uni-Lab-OS + +# 3. 以可编辑模式安装(推荐使用脚本,自动检测中文环境) +python scripts/dev_install.py + +# 或手动安装: +pip install -e . +uv pip install -r unilabos/utils/requirements.txt +``` + +**为什么使用这种方式?** +- `unilabos-env` 提供 ROS2 核心组件和 uv(通过 conda 安装,避免编译) +- `unilabos/utils/requirements.txt` 包含所有运行时需要的 pip 依赖 +- `dev_install.py` 自动检测中文环境,中文系统自动使用清华镜像 +- 使用 `uv` 替代 `pip`,安装速度更快 +- 可编辑模式:代码修改**立即生效**,无需重新安装 + +**如果安装失败或速度太慢**,可以手动执行(使用清华镜像): + +```bash +pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple +uv pip install -r unilabos/utils/requirements.txt -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple +``` + +#### 9.2 为什么需要自定义设备? Uni-Lab-OS 内置了常见设备,但您的实验室可能有特殊设备需要集成: @@ -777,7 +839,7 @@ Uni-Lab-OS 内置了常见设备,但您的实验室可能有特殊设备需要 - 特殊的实验流程 - 第三方设备集成 -#### 9.2 创建 Python 包 +#### 9.3 创建 Python 包 为了方便开发和管理,建议为您的实验室创建独立的 Python 包。 @@ -814,7 +876,7 @@ touch my_lab_devices/my_lab_devices/__init__.py touch my_lab_devices/my_lab_devices/devices/__init__.py ``` -#### 9.3 创建 setup.py +#### 9.4 创建 setup.py ```python # my_lab_devices/setup.py @@ -845,7 +907,7 @@ setup( ) ``` -#### 9.4 开发安装 +#### 9.5 开发安装 使用 `-e` 参数进行可编辑安装,这样代码修改后立即生效: @@ -860,7 +922,7 @@ pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple - 方便调试和测试 - 支持版本控制(git) -#### 9.5 编写设备驱动 +#### 9.6 编写设备驱动 创建设备驱动文件: @@ -1001,7 +1063,7 @@ class MyPump: - **返回 Dict**:所有动作方法返回字典类型 - **文档字符串**:详细说明参数和功能 -#### 9.6 测试设备驱动 +#### 9.7 测试设备驱动 创建简单的测试脚本: diff --git a/docs/user_guide/image/add_protocol.png b/docs/user_guide/image/add_protocol.png new file mode 100644 index 0000000..ce3b381 Binary files /dev/null and b/docs/user_guide/image/add_protocol.png differ diff --git a/docs/user_guide/installation.md b/docs/user_guide/installation.md index 3f94f2f..acf8fb6 100644 --- a/docs/user_guide/installation.md +++ b/docs/user_guide/installation.md @@ -13,15 +13,26 @@ - 开发者需要 Git 和基本的 Python 开发知识 - 自定义 msgs 需要 GitHub 账号 +## 安装包选择 + +Uni-Lab-OS 提供三个安装包版本,根据您的需求选择: + +| 安装包 | 适用场景 | 包含组件 | 磁盘占用 | +|--------|----------|----------|----------| +| **unilabos** | **推荐大多数用户**,生产部署 | 完整安装包,开箱即用 | ~2-3 GB | +| **unilabos-env** | 开发者环境(可编辑安装) | 仅环境依赖,通过 pip 安装 unilabos | ~2 GB | +| **unilabos-full** | 仿真可视化、完整功能体验 | unilabos + 完整 ROS2 桌面版 + Gazebo + MoveIt | ~8-10 GB | + ## 安装方式选择 根据您的使用场景,选择合适的安装方式: -| 安装方式 | 适用人群 | 特点 | 安装时间 | -| ---------------------- | -------------------- | ------------------------------ | ---------------------------- | -| **方式一:一键安装** | 实验室用户、快速体验 | 预打包环境,离线可用,无需配置 | 5-10 分钟 (网络良好的情况下) | -| **方式二:手动安装** | 标准用户、生产环境 | 灵活配置,版本可控 | 10-20 分钟 | -| **方式三:开发者安装** | 开发者、需要修改源码 | 可编辑模式,支持自定义 msgs | 20-30 分钟 | +| 安装方式 | 适用人群 | 推荐安装包 | 特点 | 安装时间 | +| ---------------------- | -------------------- | ----------------- | ------------------------------ | ---------------------------- | +| **方式一:一键安装** | 快速体验、演示 | 预打包环境 | 离线可用,无需配置 | 5-10 分钟 (网络良好的情况下) | +| **方式二:手动安装** | **大多数用户** | `unilabos` | 完整功能,开箱即用 | 10-20 分钟 | +| **方式三:开发者安装** | 开发者、需要修改源码 | `unilabos-env` | 可编辑模式,支持自定义开发 | 20-30 分钟 | +| **仿真/可视化** | 仿真测试、可视化调试 | `unilabos-full` | 含 Gazebo、rviz2、MoveIt | 30-60 分钟 | --- @@ -144,17 +155,38 @@ bash Miniforge3-$(uname)-$(uname -m).sh 使用以下命令创建 Uni-Lab 专用环境: ```bash -mamba create -n unilab python=3.11.11 # 目前ros2组件依赖版本大多为3.11.11 +mamba create -n unilab python=3.11.14 # 目前ros2组件依赖版本大多为3.11.14 mamba activate unilab -mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge + +# 选择安装包(三选一): + +# 方案 A:标准安装(推荐大多数用户) +mamba install uni-lab::unilabos -c robostack-staging -c conda-forge + +# 方案 B:开发者环境(可编辑模式开发) +mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge +# 然后安装 unilabos 和 pip 依赖: +git clone https://github.com/deepmodeling/Uni-Lab-OS.git && cd Uni-Lab-OS +pip install -e . +uv pip install -r unilabos/utils/requirements.txt + +# 方案 C:完整版(含仿真和可视化工具) +mamba install uni-lab::unilabos-full -c robostack-staging -c conda-forge ``` **参数说明**: - `-n unilab`: 创建名为 "unilab" 的环境 -- `uni-lab::unilabos`: 从 uni-lab channel 安装 unilabos 包 +- `uni-lab::unilabos`: 安装 unilabos 完整包,开箱即用(推荐) +- `uni-lab::unilabos-env`: 仅安装环境依赖,适合开发者使用 `pip install -e .` +- `uni-lab::unilabos-full`: 安装完整包(含 ROS2 Desktop、Gazebo、MoveIt 等) - `-c robostack-staging -c conda-forge`: 添加额外的软件源 +**包选择建议**: +- **日常使用/生产部署**:安装 `unilabos`(推荐,完整功能,开箱即用) +- **开发者**:安装 `unilabos-env`,然后使用 `uv pip install -r unilabos/utils/requirements.txt` 安装依赖,再 `pip install -e .` 进行可编辑安装 +- **仿真/可视化**:安装 `unilabos-full`(Gazebo、rviz2、MoveIt) + **如果遇到网络问题**,可以使用清华镜像源加速下载: ```bash @@ -163,8 +195,14 @@ mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/m mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/pkgs/free/ mamba config --add channels https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/ -# 然后重新执行安装命令 +# 然后重新执行安装命令(推荐标准安装) mamba create -n unilab uni-lab::unilabos -c robostack-staging + +# 或完整版(仿真/可视化) +mamba create -n unilab uni-lab::unilabos-full -c robostack-staging + +# pip 安装时使用清华镜像(开发者安装时使用) +uv pip install -r unilabos/utils/requirements.txt -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple ``` ### 第三步:激活环境 @@ -203,58 +241,87 @@ cd Uni-Lab-OS cd Uni-Lab-OS ``` -### 第二步:安装基础环境 +### 第二步:安装开发环境(unilabos-env) -**推荐方式**:先通过**方式一(一键安装)**或**方式二(手动安装)**完成基础环境的安装,这将包含所有必需的依赖项(ROS2、msgs 等)。 - -#### 选项 A:通过一键安装(推荐) - -参考上文"方式一:一键安装",完成基础环境的安装后,激活环境: +**重要**:开发者请使用 `unilabos-env` 包,它专为开发者设计: +- 包含 ROS2 核心组件和消息包(ros-humble-ros-core、std-msgs、geometry-msgs 等) +- 包含 transforms3d、cv-bridge、tf2 等 conda 依赖 +- 包含 `uv` 工具,用于快速安装 pip 依赖 +- **不包含** pip 依赖和 unilabos 包(由 `pip install -e .` 和 `uv pip install` 安装) ```bash +# 创建并激活环境 +mamba create -n unilab python=3.11.14 conda activate unilab + +# 安装开发者环境包(ROS2 + conda 依赖 + uv) +mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge ``` -#### 选项 B:通过手动安装 +### 第三步:安装 pip 依赖和可编辑模式安装 -参考上文"方式二:手动安装",创建并安装环境: - -```bash -mamba create -n unilab python=3.11.11 -conda activate unilab -mamba install -n unilab uni-lab::unilabos -c robostack-staging -c conda-forge -``` - -**说明**:这会安装包括 Python 3.11.11、ROS2 Humble、ros-humble-unilabos-msgs 和所有必需依赖 - -### 第三步:切换到开发版本 - -现在你已经有了一个完整可用的 Uni-Lab 环境,接下来将 unilabos 包切换为开发版本: +克隆代码并安装依赖: ```bash # 确保环境已激活 conda activate unilab -# 卸载 pip 安装的 unilabos(保留所有 conda 依赖) -pip uninstall unilabos -y - -# 克隆 dev 分支(如果还未克隆) -cd /path/to/your/workspace -git clone -b dev https://github.com/deepmodeling/Uni-Lab-OS.git -# 或者如果已经克隆,切换到 dev 分支 +# 克隆仓库(如果还未克隆) +git clone https://github.com/deepmodeling/Uni-Lab-OS.git cd Uni-Lab-OS + +# 切换到 dev 分支(可选) git checkout dev git pull - -# 以可编辑模式安装开发版 unilabos -pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple ``` -**参数说明**: +**推荐:使用安装脚本**(自动检测中文环境,使用 uv 加速): -- `-e`: editable mode(可编辑模式),代码修改立即生效,无需重新安装 -- `-i`: 使用清华镜像源加速下载 -- `pip uninstall unilabos`: 只卸载 pip 安装的 unilabos 包,不影响 conda 安装的其他依赖(如 ROS2、msgs 等) +```bash +# 自动检测中文环境,如果是中文系统则使用清华镜像 +python scripts/dev_install.py + +# 或者手动指定: +python scripts/dev_install.py --china # 强制使用清华镜像 +python scripts/dev_install.py --no-mirror # 强制使用 PyPI +python scripts/dev_install.py --skip-deps # 跳过 pip 依赖安装 +python scripts/dev_install.py --use-pip # 使用 pip 而非 uv +``` + +**手动安装**(如果脚本安装失败或速度太慢): + +```bash +# 1. 安装 unilabos(可编辑模式) +pip install -e . + +# 2. 使用 uv 安装 pip 依赖(推荐,速度更快) +uv pip install -r unilabos/utils/requirements.txt + +# 国内用户使用清华镜像: +pip install -e . -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple +uv pip install -r unilabos/utils/requirements.txt -i https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple +``` + +**注意**: +- `uv` 已包含在 `unilabos-env` 中,无需单独安装 +- `unilabos/utils/requirements.txt` 包含运行 unilabos 所需的所有 pip 依赖 +- 部分特殊包(如 pylabrobot)会在运行时由 unilabos 自动检测并安装 + +**为什么使用可编辑模式?** + +- `-e` (editable mode):代码修改**立即生效**,无需重新安装 +- 适合开发调试:修改代码后直接运行测试 +- 与 `unilabos-env` 配合:环境依赖由 conda 管理,unilabos 代码由 pip 管理 + +**验证安装**: + +```bash +# 检查 unilabos 版本 +python -c "import unilabos; print(unilabos.__version__)" + +# 检查安装位置(应该指向你的代码目录) +pip show unilabos | grep Location +``` ### 第四步:安装或自定义 ros-humble-unilabos-msgs(可选) @@ -464,7 +531,45 @@ cd $CONDA_PREFIX/envs/unilab ### 问题 8: 环境很大,有办法减小吗? -**解决方案**: 预打包的环境包含所有依赖,通常较大(压缩后 2-5GB)。这是为了确保离线安装和完整功能。如果空间有限,考虑使用方式二手动安装,只安装需要的组件。 +**解决方案**: + +1. **使用 `unilabos` 标准版**(推荐大多数用户): + ```bash + mamba install uni-lab::unilabos -c robostack-staging -c conda-forge + ``` + 标准版包含完整功能,环境大小约 2-3GB(相比完整版的 8-10GB)。 + +2. **使用 `unilabos-env` 开发者版**(最小化): + ```bash + mamba install uni-lab::unilabos-env -c robostack-staging -c conda-forge + # 然后手动安装依赖 + pip install -e . + uv pip install -r unilabos/utils/requirements.txt + ``` + 开发者版只包含环境依赖,体积最小约 2GB。 + +3. **按需安装额外组件**: + 如果后续需要特定功能,可以单独安装: + ```bash + # 需要 Jupyter + mamba install jupyter jupyros + + # 需要可视化 + mamba install matplotlib opencv + + # 需要仿真(注意:这会安装大量依赖) + mamba install ros-humble-gazebo-ros + ``` + +4. **预打包环境问题**: + 预打包环境(方式一)包含所有依赖,通常较大(压缩后 2-5GB)。这是为了确保离线安装和完整功能。 + +**包选择建议**: +| 需求 | 推荐包 | 预估大小 | +|------|--------|----------| +| 日常使用/生产部署 | `unilabos` | ~2-3 GB | +| 开发调试(可编辑模式) | `unilabos-env` | ~2 GB | +| 仿真/可视化 | `unilabos-full` | ~8-10 GB | ### 问题 9: 如何更新到最新版本? @@ -511,6 +616,7 @@ mamba update ros-humble-unilabos-msgs -c uni-lab -c robostack-staging -c conda-f **提示**: -- 生产环境推荐使用方式二(手动安装)的稳定版本 -- 开发和测试推荐使用方式三(开发者安装) -- 快速体验和演示推荐使用方式一(一键安装) +- **大多数用户**推荐使用方式二(手动安装)的 `unilabos` 标准版 +- **开发者**推荐使用方式三(开发者安装),安装 `unilabos-env` 后使用 `uv pip install -r unilabos/utils/requirements.txt` 安装依赖 +- **仿真/可视化**推荐安装 `unilabos-full` 完整版 +- **快速体验和演示**推荐使用方式一(一键安装) diff --git a/recipes/msgs/recipe.yaml b/recipes/msgs/recipe.yaml index 6d32908..f78df2e 100644 --- a/recipes/msgs/recipe.yaml +++ b/recipes/msgs/recipe.yaml @@ -1,6 +1,6 @@ package: name: ros-humble-unilabos-msgs - version: 0.10.15 + version: 0.10.17 source: path: ../../unilabos_msgs target_directory: src @@ -25,7 +25,7 @@ requirements: build: - ${{ compiler('cxx') }} - ${{ compiler('c') }} - - python ==3.11.11 + - python ==3.11.14 - numpy - if: build_platform != target_platform then: @@ -63,14 +63,14 @@ requirements: - robostack-staging::ros-humble-rosidl-default-generators - robostack-staging::ros-humble-std-msgs - robostack-staging::ros-humble-geometry-msgs - - robostack-staging::ros2-distro-mutex=0.6 + - robostack-staging::ros2-distro-mutex=0.7 run: - robostack-staging::ros-humble-action-msgs - robostack-staging::ros-humble-ros-workspace - robostack-staging::ros-humble-rosidl-default-runtime - robostack-staging::ros-humble-std-msgs - robostack-staging::ros-humble-geometry-msgs - - robostack-staging::ros2-distro-mutex=0.6 + - robostack-staging::ros2-distro-mutex=0.7 - if: osx and x86_64 then: - __osx >=${{ MACOSX_DEPLOYMENT_TARGET|default('10.14') }} diff --git a/recipes/unilabos/recipe.yaml b/recipes/unilabos/recipe.yaml index be3f1a1..feca503 100644 --- a/recipes/unilabos/recipe.yaml +++ b/recipes/unilabos/recipe.yaml @@ -1,6 +1,6 @@ package: name: unilabos - version: "0.10.15" + version: "0.10.17" source: path: ../.. diff --git a/scripts/create_readme.py b/scripts/create_readme.py index c4f3933..e87c1d8 100644 --- a/scripts/create_readme.py +++ b/scripts/create_readme.py @@ -85,7 +85,7 @@ Verification: ------------- The verify_installation.py script will check: - - Python version (3.11.11) + - Python version (3.11.14) - ROS2 rclpy installation - UniLabOS installation and dependencies @@ -104,7 +104,7 @@ Build Information: Branch: {branch} Platform: {platform} - Python: 3.11.11 + Python: 3.11.14 Date: {build_date} Troubleshooting: diff --git a/scripts/dev_install.py b/scripts/dev_install.py new file mode 100644 index 0000000..002db24 --- /dev/null +++ b/scripts/dev_install.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +""" +Development installation script for UniLabOS. +Auto-detects Chinese locale and uses appropriate mirror. + +Usage: + python scripts/dev_install.py + python scripts/dev_install.py --no-mirror # Force no mirror + python scripts/dev_install.py --china # Force China mirror + python scripts/dev_install.py --skip-deps # Skip pip dependencies installation + +Flow: + 1. pip install -e . (install unilabos in editable mode) + 2. Detect Chinese locale + 3. Use uv to install pip dependencies from requirements.txt + 4. Special packages (like pylabrobot) are handled by environment_check.py at runtime +""" + +import locale +import subprocess +import sys +import argparse +from pathlib import Path + +# Tsinghua mirror URL +TSINGHUA_MIRROR = "https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple" + + +def is_chinese_locale() -> bool: + """ + Detect if system is in Chinese locale. + Same logic as EnvironmentChecker._is_chinese_locale() + """ + try: + lang = locale.getdefaultlocale()[0] + if lang and ("zh" in lang.lower() or "chinese" in lang.lower()): + return True + except Exception: + pass + return False + + +def run_command(cmd: list, description: str, retry: int = 2) -> bool: + """Run command with retry support.""" + print(f"[INFO] {description}") + print(f"[CMD] {' '.join(cmd)}") + + for attempt in range(retry + 1): + try: + result = subprocess.run(cmd, check=True, timeout=600) + print(f"[OK] {description}") + return True + except subprocess.CalledProcessError as e: + if attempt < retry: + print(f"[WARN] Attempt {attempt + 1} failed, retrying...") + else: + print(f"[ERROR] {description} failed: {e}") + return False + except subprocess.TimeoutExpired: + print(f"[ERROR] {description} timed out") + return False + return False + + +def install_editable(project_root: Path, use_mirror: bool) -> bool: + """Install unilabos in editable mode using pip.""" + cmd = [sys.executable, "-m", "pip", "install", "-e", str(project_root)] + if use_mirror: + cmd.extend(["-i", TSINGHUA_MIRROR]) + + return run_command(cmd, "Installing unilabos in editable mode") + + +def install_requirements_uv(requirements_file: Path, use_mirror: bool) -> bool: + """Install pip dependencies using uv (installed via conda-forge::uv).""" + cmd = ["uv", "pip", "install", "-r", str(requirements_file)] + if use_mirror: + cmd.extend(["-i", TSINGHUA_MIRROR]) + + return run_command(cmd, "Installing pip dependencies with uv", retry=2) + + +def install_requirements_pip(requirements_file: Path, use_mirror: bool) -> bool: + """Fallback: Install pip dependencies using pip.""" + cmd = [sys.executable, "-m", "pip", "install", "-r", str(requirements_file)] + if use_mirror: + cmd.extend(["-i", TSINGHUA_MIRROR]) + + return run_command(cmd, "Installing pip dependencies with pip", retry=2) + + +def check_uv_available() -> bool: + """Check if uv is available (installed via conda-forge::uv).""" + try: + subprocess.run(["uv", "--version"], capture_output=True, check=True) + return True + except (subprocess.CalledProcessError, FileNotFoundError): + return False + + +def main(): + parser = argparse.ArgumentParser(description="Development installation script for UniLabOS") + parser.add_argument("--china", action="store_true", help="Force use China mirror (Tsinghua)") + parser.add_argument("--no-mirror", action="store_true", help="Force use default PyPI (no mirror)") + parser.add_argument( + "--skip-deps", action="store_true", help="Skip pip dependencies installation (only install unilabos)" + ) + parser.add_argument("--use-pip", action="store_true", help="Use pip instead of uv for dependencies") + args = parser.parse_args() + + # Determine project root + script_dir = Path(__file__).parent + project_root = script_dir.parent + requirements_file = project_root / "unilabos" / "utils" / "requirements.txt" + + if not (project_root / "setup.py").exists(): + print(f"[ERROR] setup.py not found in {project_root}") + sys.exit(1) + + print("=" * 60) + print("UniLabOS Development Installation") + print("=" * 60) + print(f"Project root: {project_root}") + print() + + # Determine mirror usage based on locale + if args.no_mirror: + use_mirror = False + print("[INFO] Mirror disabled by --no-mirror flag") + elif args.china: + use_mirror = True + print("[INFO] China mirror enabled by --china flag") + else: + use_mirror = is_chinese_locale() + if use_mirror: + print("[INFO] Chinese locale detected, using Tsinghua mirror") + else: + print("[INFO] Non-Chinese locale detected, using default PyPI") + + print() + + # Step 1: Install unilabos in editable mode + print("[STEP 1] Installing unilabos in editable mode...") + if not install_editable(project_root, use_mirror): + print("[ERROR] Failed to install unilabos") + print() + print("Manual fallback:") + if use_mirror: + print(f" pip install -e {project_root} -i {TSINGHUA_MIRROR}") + else: + print(f" pip install -e {project_root}") + sys.exit(1) + + print() + + # Step 2: Install pip dependencies + if args.skip_deps: + print("[INFO] Skipping pip dependencies installation (--skip-deps)") + else: + print("[STEP 2] Installing pip dependencies...") + + if not requirements_file.exists(): + print(f"[WARN] Requirements file not found: {requirements_file}") + print("[INFO] Skipping dependencies installation") + else: + # Try uv first (faster), fallback to pip + if args.use_pip: + print("[INFO] Using pip (--use-pip flag)") + success = install_requirements_pip(requirements_file, use_mirror) + elif check_uv_available(): + print("[INFO] Using uv (installed via conda-forge::uv)") + success = install_requirements_uv(requirements_file, use_mirror) + if not success: + print("[WARN] uv failed, falling back to pip...") + success = install_requirements_pip(requirements_file, use_mirror) + else: + print("[WARN] uv not available (should be installed via: mamba install conda-forge::uv)") + print("[INFO] Falling back to pip...") + success = install_requirements_pip(requirements_file, use_mirror) + + if not success: + print() + print("[WARN] Failed to install some dependencies automatically.") + print("You can manually install them:") + if use_mirror: + print(f" uv pip install -r {requirements_file} -i {TSINGHUA_MIRROR}") + print(" or:") + print(f" pip install -r {requirements_file} -i {TSINGHUA_MIRROR}") + else: + print(f" uv pip install -r {requirements_file}") + print(" or:") + print(f" pip install -r {requirements_file}") + + print() + print("=" * 60) + print("Installation complete!") + print("=" * 60) + print() + print("Note: Some special packages (like pylabrobot) are installed") + print("automatically at runtime by unilabos if needed.") + print() + print("Verify installation:") + print(' python -c "import unilabos; print(unilabos.__version__)"') + print() + print("If you encounter issues, you can manually install dependencies:") + if use_mirror: + print(f" uv pip install -r unilabos/utils/requirements.txt -i {TSINGHUA_MIRROR}") + else: + print(" uv pip install -r unilabos/utils/requirements.txt") + print() + + +if __name__ == "__main__": + main() diff --git a/setup.py b/setup.py index b6ae5ed..b3a00f1 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ package_name = 'unilabos' setup( name=package_name, - version='0.10.15', + version='0.10.17', packages=find_packages(), include_package_data=True, install_requires=['setuptools'], diff --git a/tests/workflow/test.json b/tests/workflow/test.json new file mode 100644 index 0000000..8fc6449 --- /dev/null +++ b/tests/workflow/test.json @@ -0,0 +1,213 @@ +{ + "workflow": [ + { + "action": "transfer_liquid", + "action_args": { + "sources": "cell_lines", + "targets": "Liquid_1", + "asp_vol": 100.0, + "dis_vol": 74.75, + "asp_flow_rate": 94.0, + "dis_flow_rate": 95.5 + } + }, + { + "action": "transfer_liquid", + "action_args": { + "sources": "cell_lines", + "targets": "Liquid_2", + "asp_vol": 100.0, + "dis_vol": 74.75, + "asp_flow_rate": 94.0, + "dis_flow_rate": 95.5 + } + }, + { + "action": "transfer_liquid", + "action_args": { + "sources": "cell_lines", + "targets": "Liquid_3", + "asp_vol": 100.0, + "dis_vol": 74.75, + "asp_flow_rate": 94.0, + "dis_flow_rate": 95.5 + } + }, + { + "action": "transfer_liquid", + "action_args": { + "sources": "cell_lines_2", + "targets": "Liquid_4", + "asp_vol": 100.0, + "dis_vol": 74.75, + "asp_flow_rate": 94.0, + "dis_flow_rate": 95.5 + } + }, + { + "action": "transfer_liquid", + "action_args": { + "sources": "cell_lines_2", + "targets": "Liquid_5", + "asp_vol": 100.0, + "dis_vol": 74.75, + "asp_flow_rate": 94.0, + "dis_flow_rate": 95.5 + } + }, + { + "action": "transfer_liquid", + "action_args": { + "sources": "cell_lines_2", + "targets": "Liquid_6", + "asp_vol": 100.0, + "dis_vol": 74.75, + "asp_flow_rate": 94.0, + "dis_flow_rate": 95.5 + } + }, + { + "action": "transfer_liquid", + "action_args": { + "sources": "cell_lines_3", + "targets": "dest_set", + "asp_vol": 100.0, + "dis_vol": 74.75, + "asp_flow_rate": 94.0, + "dis_flow_rate": 95.5 + } + }, + { + "action": "transfer_liquid", + "action_args": { + "sources": "cell_lines_3", + "targets": "dest_set_2", + "asp_vol": 100.0, + "dis_vol": 74.75, + "asp_flow_rate": 94.0, + "dis_flow_rate": 95.5 + } + }, + { + "action": "transfer_liquid", + "action_args": { + "sources": "cell_lines_3", + "targets": "dest_set_3", + "asp_vol": 100.0, + "dis_vol": 74.75, + "asp_flow_rate": 94.0, + "dis_flow_rate": 95.5 + } + } + ], + "reagent": { + "Liquid_1": { + "slot": 1, + "well": [ + "A4", + "A7", + "A10" + ], + "labware": "rep 1" + }, + "Liquid_4": { + "slot": 1, + "well": [ + "A4", + "A7", + "A10" + ], + "labware": "rep 1" + }, + "dest_set": { + "slot": 1, + "well": [ + "A4", + "A7", + "A10" + ], + "labware": "rep 1" + }, + "Liquid_2": { + "slot": 2, + "well": [ + "A3", + "A5", + "A8" + ], + "labware": "rep 2" + }, + "Liquid_5": { + "slot": 2, + "well": [ + "A3", + "A5", + "A8" + ], + "labware": "rep 2" + }, + "dest_set_2": { + "slot": 2, + "well": [ + "A3", + "A5", + "A8" + ], + "labware": "rep 2" + }, + "Liquid_3": { + "slot": 3, + "well": [ + "A4", + "A6", + "A10" + ], + "labware": "rep 3" + }, + "Liquid_6": { + "slot": 3, + "well": [ + "A4", + "A6", + "A10" + ], + "labware": "rep 3" + }, + "dest_set_3": { + "slot": 3, + "well": [ + "A4", + "A6", + "A10" + ], + "labware": "rep 3" + }, + "cell_lines": { + "slot": 4, + "well": [ + "A1", + "A3", + "A5" + ], + "labware": "DRUG + YOYO-MEDIA" + }, + "cell_lines_2": { + "slot": 4, + "well": [ + "A1", + "A3", + "A5" + ], + "labware": "DRUG + YOYO-MEDIA" + }, + "cell_lines_3": { + "slot": 4, + "well": [ + "A1", + "A3", + "A5" + ], + "labware": "DRUG + YOYO-MEDIA" + } + } +} \ No newline at end of file diff --git a/unilabos/__init__.py b/unilabos/__init__.py index d5ac10a..50ab2b0 100644 --- a/unilabos/__init__.py +++ b/unilabos/__init__.py @@ -1 +1 @@ -__version__ = "0.10.15" +__version__ = "0.10.17" diff --git a/unilabos/__main__.py b/unilabos/__main__.py new file mode 100644 index 0000000..6483226 --- /dev/null +++ b/unilabos/__main__.py @@ -0,0 +1,6 @@ +"""Entry point for `python -m unilabos`.""" + +from unilabos.app.main import main + +if __name__ == "__main__": + main() diff --git a/unilabos/app/main.py b/unilabos/app/main.py index 8ec26c0..a6539c3 100644 --- a/unilabos/app/main.py +++ b/unilabos/app/main.py @@ -7,7 +7,6 @@ import sys import threading import time from typing import Dict, Any, List - import networkx as nx import yaml @@ -17,9 +16,9 @@ unilabos_dir = os.path.dirname(os.path.dirname(current_dir)) if unilabos_dir not in sys.path: sys.path.append(unilabos_dir) +from unilabos.app.utils import cleanup_for_restart from unilabos.utils.banner_print import print_status, print_unilab_banner from unilabos.config.config import load_config, BasicConfig, HTTPConfig -from unilabos.app.utils import cleanup_for_restart # Global restart flags (used by ws_client and web/server) _restart_requested: bool = False @@ -161,6 +160,12 @@ def parse_args(): default=False, help="Complete registry information", ) + parser.add_argument( + "--check_mode", + action="store_true", + default=False, + help="Run in check mode for CI: validates registry imports and ensures no file changes", + ) parser.add_argument( "--no_update_feedback", action="store_true", @@ -211,7 +216,10 @@ def main(): args_dict = vars(args) # 环境检查 - 检查并自动安装必需的包 (可选) - if not args_dict.get("skip_env_check", False): + skip_env_check = args_dict.get("skip_env_check", False) + check_mode = args_dict.get("check_mode", False) + + if not skip_env_check: from unilabos.utils.environment_check import check_environment if not check_environment(auto_install=True): @@ -222,7 +230,21 @@ def main(): # 加载配置文件,优先加载config,然后从env读取 config_path = args_dict.get("config") - if os.getcwd().endswith("unilabos_data"): + + if check_mode: + args_dict["working_dir"] = os.path.abspath(os.getcwd()) + # 当 skip_env_check 时,默认使用当前目录作为 working_dir + if skip_env_check and not args_dict.get("working_dir") and not config_path: + working_dir = os.path.abspath(os.getcwd()) + print_status(f"跳过环境检查模式:使用当前目录作为工作目录 {working_dir}", "info") + # 检查当前目录是否有 local_config.py + local_config_in_cwd = os.path.join(working_dir, "local_config.py") + if os.path.exists(local_config_in_cwd): + config_path = local_config_in_cwd + print_status(f"发现本地配置文件: {config_path}", "info") + else: + print_status(f"未指定config路径,可通过 --config 传入 local_config.py 文件路径", "info") + elif os.getcwd().endswith("unilabos_data"): working_dir = os.path.abspath(os.getcwd()) else: working_dir = os.path.abspath(os.path.join(os.getcwd(), "unilabos_data")) @@ -241,7 +263,7 @@ def main(): working_dir = os.path.dirname(config_path) elif os.path.exists(working_dir) and os.path.exists(os.path.join(working_dir, "local_config.py")): config_path = os.path.join(working_dir, "local_config.py") - elif not config_path and ( + elif not skip_env_check and not config_path and ( not os.path.exists(working_dir) or not os.path.exists(os.path.join(working_dir, "local_config.py")) ): print_status(f"未指定config路径,可通过 --config 传入 local_config.py 文件路径", "info") @@ -255,9 +277,11 @@ def main(): print_status(f"已创建 local_config.py 路径: {config_path}", "info") else: os._exit(1) - # 加载配置文件 + + # 加载配置文件 (check_mode 跳过) print_status(f"当前工作目录为 {working_dir}", "info") - load_config_from_file(config_path) + if not check_mode: + load_config_from_file(config_path) # 根据配置重新设置日志级别 from unilabos.utils.log import configure_logger, logger @@ -313,6 +337,7 @@ def main(): machine_name = "".join([c if c.isalnum() or c == "_" else "_" for c in machine_name]) BasicConfig.machine_name = machine_name BasicConfig.vis_2d_enable = args_dict["2d_vis"] + BasicConfig.check_mode = check_mode from unilabos.resources.graphio import ( read_node_link_json, @@ -331,10 +356,14 @@ def main(): # 显示启动横幅 print_unilab_banner(args_dict) - # 注册表 - lab_registry = build_registry( - args_dict["registry_path"], args_dict.get("complete_registry", False), BasicConfig.upload_registry - ) + # 注册表 - check_mode 时强制启用 complete_registry + complete_registry = args_dict.get("complete_registry", False) or check_mode + lab_registry = build_registry(args_dict["registry_path"], complete_registry, BasicConfig.upload_registry) + + # Check mode: complete_registry 完成后直接退出,git diff 检测由 CI workflow 执行 + if check_mode: + print_status("Check mode: complete_registry 完成,退出", "info") + os._exit(0) if BasicConfig.upload_registry: # 设备注册到服务端 - 需要 ak 和 sk diff --git a/unilabos/app/utils.py b/unilabos/app/utils.py index d10c2e0..f6114a1 100644 --- a/unilabos/app/utils.py +++ b/unilabos/app/utils.py @@ -4,8 +4,40 @@ UniLabOS 应用工具函数 提供清理、重启等工具函数 """ -import gc +import glob import os +import shutil +import sys + + +def patch_rclpy_dll_windows(): + """在 Windows + conda 环境下为 rclpy 打 DLL 加载补丁""" + if sys.platform != "win32" or not os.environ.get("CONDA_PREFIX"): + return + try: + import rclpy + + return + except ImportError as e: + if not str(e).startswith("DLL load failed"): + return + cp = os.environ["CONDA_PREFIX"] + impl = os.path.join(cp, "Lib", "site-packages", "rclpy", "impl", "implementation_singleton.py") + pyd = glob.glob(os.path.join(cp, "Lib", "site-packages", "rclpy", "_rclpy_pybind11*.pyd")) + if not os.path.exists(impl) or not pyd: + return + with open(impl, "r", encoding="utf-8") as f: + content = f.read() + lib_bin = os.path.join(cp, "Library", "bin").replace("\\", "/") + patch = f'# UniLabOS DLL Patch\nimport os,ctypes\nos.add_dll_directory("{lib_bin}") if hasattr(os,"add_dll_directory") else None\ntry: ctypes.CDLL("{pyd[0].replace(chr(92),"/")}")\nexcept: pass\n# End Patch\n' + shutil.copy2(impl, impl + ".bak") + with open(impl, "w", encoding="utf-8") as f: + f.write(patch + content) + + +patch_rclpy_dll_windows() + +import gc import threading import time diff --git a/unilabos/app/web/client.py b/unilabos/app/web/client.py index 64a9418..0ecf460 100644 --- a/unilabos/app/web/client.py +++ b/unilabos/app/web/client.py @@ -359,9 +359,7 @@ class HTTPClient: Returns: Dict: API响应数据,包含 code 和 data (uuid, name) """ - # target_lab_uuid 暂时使用默认值,后续由后端根据 ak/sk 获取 payload = { - "target_lab_uuid": "28c38bb0-63f6-4352-b0d8-b5b8eb1766d5", "name": name, "data": { "workflow_uuid": workflow_uuid, diff --git a/unilabos/app/web/controller.py b/unilabos/app/web/controller.py index 9b0f1ff..acd1f56 100644 --- a/unilabos/app/web/controller.py +++ b/unilabos/app/web/controller.py @@ -58,14 +58,14 @@ class JobResultStore: feedback=feedback or {}, timestamp=time.time(), ) - logger.debug(f"[JobResultStore] Stored result for job {job_id[:8]}, status={status}") + logger.trace(f"[JobResultStore] Stored result for job {job_id[:8]}, status={status}") def get_and_remove(self, job_id: str) -> Optional[JobResult]: """获取并删除任务结果""" with self._results_lock: result = self._results.pop(job_id, None) if result: - logger.debug(f"[JobResultStore] Retrieved and removed result for job {job_id[:8]}") + logger.trace(f"[JobResultStore] Retrieved and removed result for job {job_id[:8]}") return result def get_result(self, job_id: str) -> Optional[JobResult]: diff --git a/unilabos/app/ws_client.py b/unilabos/app/ws_client.py index 95526f0..8644353 100644 --- a/unilabos/app/ws_client.py +++ b/unilabos/app/ws_client.py @@ -23,7 +23,7 @@ from typing import Optional, Dict, Any, List from urllib.parse import urlparse from enum import Enum -from jedi.inference.gradual.typing import TypedDict +from typing_extensions import TypedDict from unilabos.app.model import JobAddReq from unilabos.ros.nodes.presets.host_node import HostNode @@ -154,7 +154,7 @@ class DeviceActionManager: job_info.set_ready_timeout(10) # 设置10秒超时 self.active_jobs[device_key] = job_info job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name) - logger.info(f"[DeviceActionManager] Job {job_log} can start immediately for {device_key}") + logger.trace(f"[DeviceActionManager] Job {job_log} can start immediately for {device_key}") return True def start_job(self, job_id: str) -> bool: @@ -210,8 +210,9 @@ class DeviceActionManager: job_info.update_timestamp() # 从all_jobs中移除已结束的job del self.all_jobs[job_id] - job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name) - logger.info(f"[DeviceActionManager] Job {job_log} ended for {device_key}") + # job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name) + # logger.debug(f"[DeviceActionManager] Job {job_log} ended for {device_key}") + pass else: job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name) logger.warning(f"[DeviceActionManager] Job {job_log} was not active for {device_key}") @@ -227,7 +228,7 @@ class DeviceActionManager: next_job_log = format_job_log( next_job.job_id, next_job.task_id, next_job.device_id, next_job.action_name ) - logger.info(f"[DeviceActionManager] Next job {next_job_log} can start for {device_key}") + logger.trace(f"[DeviceActionManager] Next job {next_job_log} can start for {device_key}") return next_job return None @@ -268,7 +269,7 @@ class DeviceActionManager: # 从all_jobs中移除 del self.all_jobs[job_id] job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name) - logger.info(f"[DeviceActionManager] Active job {job_log} cancelled for {device_key}") + logger.trace(f"[DeviceActionManager] Active job {job_log} cancelled for {device_key}") # 启动下一个任务 if device_key in self.device_queues and self.device_queues[device_key]: @@ -281,7 +282,7 @@ class DeviceActionManager: next_job_log = format_job_log( next_job.job_id, next_job.task_id, next_job.device_id, next_job.action_name ) - logger.info(f"[DeviceActionManager] Next job {next_job_log} can start after cancel") + logger.trace(f"[DeviceActionManager] Next job {next_job_log} can start after cancel") return True # 如果是排队中的任务 @@ -295,7 +296,7 @@ class DeviceActionManager: job_log = format_job_log( job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name ) - logger.info(f"[DeviceActionManager] Queued job {job_log} cancelled for {device_key}") + logger.trace(f"[DeviceActionManager] Queued job {job_log} cancelled for {device_key}") return True job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name) @@ -494,8 +495,12 @@ class MessageProcessor: await self._process_message(message_type, message_data) else: if message_type.endswith("_material"): - logger.trace(f"[MessageProcessor] 收到一条归属 {data.get('edge_session')} 的旧消息:{data}") - logger.debug(f"[MessageProcessor] 跳过了一条归属 {data.get('edge_session')} 的旧消息: {data.get('action')}") + logger.trace( + f"[MessageProcessor] 收到一条归属 {data.get('edge_session')} 的旧消息:{data}" + ) + logger.debug( + f"[MessageProcessor] 跳过了一条归属 {data.get('edge_session')} 的旧消息: {data.get('action')}" + ) else: await self._process_message(message_type, message_data) except json.JSONDecodeError: @@ -565,7 +570,7 @@ class MessageProcessor: async def _process_message(self, message_type: str, message_data: Dict[str, Any]): """处理收到的消息""" - logger.debug(f"[MessageProcessor] Processing message: {message_type}") + logger.trace(f"[MessageProcessor] Processing message: {message_type}") try: if message_type == "pong": @@ -637,13 +642,13 @@ class MessageProcessor: await self._send_action_state_response( device_id, action_name, task_id, job_id, "query_action_status", True, 0 ) - logger.info(f"[MessageProcessor] Job {job_log} can start immediately") + logger.trace(f"[MessageProcessor] Job {job_log} can start immediately") else: # 需要排队 await self._send_action_state_response( device_id, action_name, task_id, job_id, "query_action_status", False, 10 ) - logger.info(f"[MessageProcessor] Job {job_log} queued") + logger.trace(f"[MessageProcessor] Job {job_log} queued") # 通知QueueProcessor有新的队列更新 if self.queue_processor: @@ -847,9 +852,7 @@ class MessageProcessor: device_action_groups[key_add] = [] device_action_groups[key_add].append(item["uuid"]) - logger.info( - f"[资源同步] 跨站Transfer: {item['uuid'][:8]} from {device_old_id} to {device_id}" - ) + logger.info(f"[资源同步] 跨站Transfer: {item['uuid'][:8]} from {device_old_id} to {device_id}") else: # 正常update key = (device_id, "update") @@ -863,7 +866,9 @@ class MessageProcessor: device_action_groups[key] = [] device_action_groups[key].append(item["uuid"]) - logger.trace(f"[资源同步] 动作 {action} 分组数量: {len(device_action_groups)}, 总数量: {len(resource_uuid_list)}") + logger.trace( + f"[资源同步] 动作 {action} 分组数量: {len(device_action_groups)}, 总数量: {len(resource_uuid_list)}" + ) # 为每个(device_id, action)创建独立的更新线程 for (device_id, actual_action), items in device_action_groups.items(): @@ -911,13 +916,13 @@ class MessageProcessor: # 发送确认消息 if self.websocket_client: - await self.websocket_client.send_message({ - "action": "restart_acknowledged", - "data": {"reason": reason, "delay": delay} - }) + await self.websocket_client.send_message( + {"action": "restart_acknowledged", "data": {"reason": reason, "delay": delay}} + ) # 设置全局重启标志 import unilabos.app.main as main_module + main_module._restart_requested = True main_module._restart_reason = reason @@ -927,10 +932,12 @@ class MessageProcessor: # 在新线程中执行清理,避免阻塞当前事件循环 def do_cleanup(): import time + time.sleep(0.5) # 给当前消息处理完成的时间 logger.info(f"[MessageProcessor] Starting cleanup for restart, reason: {reason}") try: from unilabos.app.utils import cleanup_for_restart + if cleanup_for_restart(): logger.info("[MessageProcessor] Cleanup successful, main() will restart") else: @@ -1128,7 +1135,7 @@ class QueueProcessor: success = self.message_processor.send_message(message) job_log = format_job_log(job_info.job_id, job_info.task_id, job_info.device_id, job_info.action_name) if success: - logger.debug(f"[QueueProcessor] Sent busy/need_more for queued job {job_log}") + logger.trace(f"[QueueProcessor] Sent busy/need_more for queued job {job_log}") else: logger.warning(f"[QueueProcessor] Failed to send busy status for job {job_log}") @@ -1151,7 +1158,7 @@ class QueueProcessor: job_info.action_name, ) - logger.info(f"[QueueProcessor] Job {job_log} completed with status: {status}") + logger.trace(f"[QueueProcessor] Job {job_log} completed with status: {status}") # 结束任务,获取下一个可执行的任务 next_job = self.device_manager.end_job(job_id) @@ -1171,8 +1178,8 @@ class QueueProcessor: }, } self.message_processor.send_message(message) - next_job_log = format_job_log(next_job.job_id, next_job.task_id, next_job.device_id, next_job.action_name) - logger.info(f"[QueueProcessor] Notified next job {next_job_log} can start") + # next_job_log = format_job_log(next_job.job_id, next_job.task_id, next_job.device_id, next_job.action_name) + # logger.debug(f"[QueueProcessor] Notified next job {next_job_log} can start") # 立即触发下一轮状态检查 self.notify_queue_update() @@ -1314,7 +1321,7 @@ class WebSocketClient(BaseCommunicationClient): except (KeyError, AttributeError): logger.warning(f"[WebSocketClient] Failed to remove job {item.job_id} from HostNode status") - logger.info(f"[WebSocketClient] Intercepting final status for job_id: {item.job_id} - {status}") + # logger.debug(f"[WebSocketClient] Intercepting final status for job_id: {item.job_id} - {status}") # 通知队列处理器job完成(包括timeout的job) self.queue_processor.handle_job_completed(item.job_id, status) @@ -1381,7 +1388,9 @@ class WebSocketClient(BaseCommunicationClient): if host_node: # 获取设备信息 for device_id, namespace in host_node.devices_names.items(): - device_key = f"{namespace}/{device_id}" if namespace.startswith("/") else f"/{namespace}/{device_id}" + device_key = ( + f"{namespace}/{device_id}" if namespace.startswith("/") else f"/{namespace}/{device_id}" + ) is_online = device_key in host_node._online_devices # 获取设备的动作信息 @@ -1395,14 +1404,16 @@ class WebSocketClient(BaseCommunicationClient): "action_type": str(type(client).__name__), } - devices.append({ - "device_id": device_id, - "namespace": namespace, - "device_key": device_key, - "is_online": is_online, - "machine_name": host_node.device_machine_names.get(device_id, machine_name), - "actions": actions, - }) + devices.append( + { + "device_id": device_id, + "namespace": namespace, + "device_key": device_key, + "is_online": is_online, + "machine_name": host_node.device_machine_names.get(device_id, machine_name), + "actions": actions, + } + ) logger.info(f"[WebSocketClient] Collected {len(devices)} devices for host_ready") except Exception as e: diff --git a/unilabos/config/config.py b/unilabos/config/config.py index f3dba5d..c91a07d 100644 --- a/unilabos/config/config.py +++ b/unilabos/config/config.py @@ -22,6 +22,7 @@ class BasicConfig: startup_json_path = None # 填写绝对路径 disable_browser = False # 禁止浏览器自动打开 port = 8002 # 本地HTTP服务 + check_mode = False # CI 检查模式,用于验证 registry 导入和文件一致性 # 'TRACE', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL' log_level: Literal["TRACE", "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"] = "DEBUG" diff --git a/unilabos/ros/x/__init__.py b/unilabos/devices/Qone_nmr/__init__.py similarity index 100% rename from unilabos/ros/x/__init__.py rename to unilabos/devices/Qone_nmr/__init__.py diff --git a/unilabos/devices/liquid_handling/liquid_handler_abstract.py b/unilabos/devices/liquid_handling/liquid_handler_abstract.py index d02129c..aa695a0 100644 --- a/unilabos/devices/liquid_handling/liquid_handler_abstract.py +++ b/unilabos/devices/liquid_handling/liquid_handler_abstract.py @@ -28,21 +28,40 @@ from pylabrobot.resources import ( Tip, ) +from unilabos.registry.placeholder_type import ResourceSlot from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode +from unilabos.resources.resource_tracker import ResourceTreeSet + + class SimpleReturn(TypedDict): samples: list volumes: list + +class SetLiquidReturn(TypedDict): + wells: list + volumes: list + + +class SetLiquidFromPlateReturn(TypedDict): + plate: list + wells: list + volumes: list + + class LiquidHandlerMiddleware(LiquidHandler): - def __init__(self, backend: LiquidHandlerBackend, deck: Deck, simulator: bool = False, channel_num: int = 8, **kwargs): + def __init__( + self, backend: LiquidHandlerBackend, deck: Deck, simulator: bool = False, channel_num: int = 8, **kwargs + ): self._simulator = simulator self.channel_num = channel_num self.pending_liquids_dict = {} joint_config = kwargs.get("joint_config", None) if simulator: if joint_config: - self._simulate_backend = UniLiquidHandlerRvizBackend(channel_num, kwargs["total_height"], - joint_config=joint_config, lh_device_id=deck.name) + self._simulate_backend = UniLiquidHandlerRvizBackend( + channel_num, kwargs["total_height"], joint_config=joint_config, lh_device_id=deck.name + ) else: self._simulate_backend = LiquidHandlerChatterboxBackend(channel_num) self._simulate_handler = LiquidHandlerAbstract(self._simulate_backend, deck, False) @@ -137,7 +156,7 @@ class LiquidHandlerMiddleware(LiquidHandler): ) await super().drop_tips(tip_spots, use_channels, offsets, allow_nonzero_volume, **backend_kwargs) self.pending_liquids_dict = {} - return + return async def return_tips( self, use_channels: Optional[list[int]] = None, allow_nonzero_volume: bool = False, **backend_kwargs @@ -159,11 +178,13 @@ class LiquidHandlerMiddleware(LiquidHandler): if not offsets or (isinstance(offsets, list) and len(offsets) != len(use_channels)): offsets = [Coordinate.zero()] * len(use_channels) if self._simulator: - return await self._simulate_handler.discard_tips(use_channels, allow_nonzero_volume, offsets, **backend_kwargs) + return await self._simulate_handler.discard_tips( + use_channels, allow_nonzero_volume, offsets, **backend_kwargs + ) await super().discard_tips(use_channels, allow_nonzero_volume, offsets, **backend_kwargs) self.pending_liquids_dict = {} - return - + return + def _check_containers(self, resources: Sequence[Resource]): super()._check_containers(resources) @@ -180,7 +201,6 @@ class LiquidHandlerMiddleware(LiquidHandler): **backend_kwargs, ): - if self._simulator: return await self._simulate_handler.aspirate( resources, @@ -208,15 +228,16 @@ class LiquidHandlerMiddleware(LiquidHandler): res_samples = [] res_volumes = [] for resource, volume, channel in zip(resources, vols, use_channels): - res_samples.append({"name": resource.name, "sample_uuid": resource.unilabos_extra.get("sample_uuid", None)}) + res_samples.append( + {"name": resource.name, "sample_uuid": resource.unilabos_extra.get("sample_uuid", None)} + ) res_volumes.append(volume) self.pending_liquids_dict[channel] = { "sample_uuid": resource.unilabos_extra.get("sample_uuid", None), - "volume": volume + "volume": volume, } return SimpleReturn(samples=res_samples, volumes=res_volumes) - async def dispense( self, resources: Sequence[Container], @@ -261,7 +282,7 @@ class LiquidHandlerMiddleware(LiquidHandler): res_volumes.append(volume) return SimpleReturn(samples=res_samples, volumes=res_volumes) - + async def transfer( self, source: Well, @@ -578,10 +599,18 @@ class LiquidHandlerMiddleware(LiquidHandler): class LiquidHandlerAbstract(LiquidHandlerMiddleware): """Extended LiquidHandler with additional operations.""" + support_touch_tip = True _ros_node: BaseROS2DeviceNode - def __init__(self, backend: LiquidHandlerBackend, deck: Deck, simulator: bool=False, channel_num:int = 8, total_height:float = 310): + def __init__( + self, + backend: LiquidHandlerBackend, + deck: Deck, + simulator: bool = False, + channel_num: int = 8, + total_height: float = 310, + ): """Initialize a LiquidHandler. Args: @@ -605,6 +634,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): module_name = ".".join(components[:-1]) try: import importlib + mod = importlib.import_module(module_name) except ImportError: mod = None @@ -614,6 +644,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): # Try pylabrobot style import (if available) try: import pylabrobot + backend_cls = getattr(pylabrobot, type_str, None) except Exception: backend_cls = None @@ -631,16 +662,56 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): self._ros_node = ros_node @classmethod - def set_liquid(cls, wells: list[Well], liquid_names: list[str], volumes: list[float]) -> SimpleReturn: - """Set the liquid in a well.""" - res_samples = [] + def set_liquid(cls, wells: list[Well], liquid_names: list[str], volumes: list[float]) -> SetLiquidReturn: + """Set the liquid in a well. + + 如果 liquid_names 和 volumes 为空,但 wells 不为空,直接返回 wells。 + """ res_volumes = [] + # 如果 liquid_names 和 volumes 都为空,直接返回 wells + if not liquid_names and not volumes: + return SetLiquidReturn( + wells=ResourceTreeSet.from_plr_resources(wells, known_newly_created=False).dump(), volumes=res_volumes # type: ignore + ) + for well, liquid_name, volume in zip(wells, liquid_names, volumes): well.set_liquids([(liquid_name, volume)]) # type: ignore - res_samples.append({"name": well.name, "sample_uuid": well.unilabos_extra.get("sample_uuid", None)}) res_volumes.append(volume) - - return SimpleReturn(samples=res_samples, volumes=res_volumes) + + return SetLiquidReturn( + wells=ResourceTreeSet.from_plr_resources(wells, known_newly_created=False).dump(), volumes=res_volumes # type: ignore + ) + + @classmethod + def set_liquid_from_plate( + cls, plate: ResourceSlot, well_names: list[str], liquid_names: list[str], volumes: list[float] + ) -> SetLiquidFromPlateReturn: + """Set the liquid in wells of a plate by well names (e.g., A1, A2, B3). + + 如果 liquid_names 和 volumes 为空,但 plate 和 well_names 不为空,直接返回 plate 和 wells。 + """ + # 根据 well_names 获取对应的 Well 对象 + wells = [plate.get_well(name) for name in well_names] + res_volumes = [] + + # 如果 liquid_names 和 volumes 都为空,直接返回 + if not liquid_names and not volumes: + return SetLiquidFromPlateReturn( + plate=ResourceTreeSet.from_plr_resources([plate], known_newly_created=False).dump(), # type: ignore + wells=ResourceTreeSet.from_plr_resources(wells, known_newly_created=False).dump(), # type: ignore + volumes=res_volumes, + ) + + for well, liquid_name, volume in zip(wells, liquid_names, volumes): + well.set_liquids([(liquid_name, volume)]) # type: ignore + res_volumes.append(volume) + + return SetLiquidFromPlateReturn( + plate=ResourceTreeSet.from_plr_resources([plate], known_newly_created=False).dump(), # type: ignore + wells=ResourceTreeSet.from_plr_resources(wells, known_newly_created=False).dump(), # type: ignore + volumes=res_volumes, + ) + # --------------------------------------------------------------- # REMOVE LIQUID -------------------------------------------------- # --------------------------------------------------------------- @@ -655,7 +726,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): source_wells = self.group_info.get(source_group_name, []) target_wells = self.group_info.get(target_group_name, []) - + rack_info = dict() for child in self.deck.children: if issubclass(child.__class__, TipRack): @@ -666,17 +737,17 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): break else: rack_info[rack.name] = (rack, tip.maximal_volume - unit_volume) - + if len(rack_info) == 0: raise ValueError(f"No tip rack can support volume {unit_volume}.") - + rack_info = sorted(rack_info.items(), key=lambda x: x[1][1]) for child in self.deck.children: if child.name == rack_info[0][0]: target_rack = child target_rack = cast(TipRack, target_rack) available_tips = {} - for (idx, tipSpot) in enumerate(target_rack.get_all_items()): + for idx, tipSpot in enumerate(target_rack.get_all_items()): if tipSpot.has_tip(): available_tips[idx] = tipSpot continue @@ -684,10 +755,10 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): print("channel_num", self.channel_num) if self.channel_num == 8: - tip_prefix = list(available_tips.values())[0].name.split('_')[0] - colnum_list = [int(tip.name.split('_')[-1][1:]) for tip in available_tips.values()] + tip_prefix = list(available_tips.values())[0].name.split("_")[0] + colnum_list = [int(tip.name.split("_")[-1][1:]) for tip in available_tips.values()] available_cols = [colnum for colnum, count in dict(Counter(colnum_list)).items() if count == 8] - available_cols.sort() + available_cols.sort() available_tips_dict = {tip.name: tip for tip in available_tips.values()} tips_to_use = [available_tips_dict[f"{tip_prefix}_{chr(65 + i)}{available_cols[0]}"] for i in range(8)] print("tips_to_use", tips_to_use) @@ -698,16 +769,16 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): await self.dispense(target_wells, [unit_volume] * 8, use_channels=list(range(0, 8))) await self.discard_tips(use_channels=list(range(0, 8))) - elif self.channel_num == 1: - + elif self.channel_num == 1: + for num_well in range(len(target_wells)): - tip_to_use = available_tips[list(available_tips.keys())[num_well]] + tip_to_use = available_tips[list(available_tips.keys())[num_well]] print("tip_to_use", tip_to_use) await self.pick_up_tips([tip_to_use], use_channels=[0]) print("source_wells", source_wells) print("target_wells", target_wells) if len(source_wells) == 1: - await self.aspirate([source_wells[0]], [unit_volume], use_channels=[0]) + await self.aspirate([source_wells[0]], [unit_volume], use_channels=[0]) else: await self.aspirate([source_wells[num_well]], [unit_volume], use_channels=[0]) await self.dispense([target_wells[num_well]], [unit_volume], use_channels=[0]) @@ -729,7 +800,6 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): """Create a new protocol with the given metadata.""" pass - async def remove_liquid( self, vols: List[float], @@ -787,11 +857,12 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): await self.discard_tips() elif len(use_channels) == 8 and self.backend.num_channels == 8: - - + # 对于8个的情况,需要判断此时任务是不是能被8通道移液站来成功处理 if len(sources) % 8 != 0: - raise ValueError(f"Length of `sources` {len(sources)} must be a multiple of 8 for 8-channel mode.") + raise ValueError( + f"Length of `sources` {len(sources)} must be a multiple of 8 for 8-channel mode." + ) # 8个8个来取任务序列 @@ -800,18 +871,28 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): for _ in range(len(use_channels)): tip.extend(next(self.current_tip)) await self.pick_up_tips(tip) - current_targets = waste_liquid[i:i + 8] - current_reagent_sources = sources[i:i + 8] - current_asp_vols = vols[i:i + 8] - current_dis_vols = vols[i:i + 8] - current_asp_flow_rates = flow_rates[i:i + 8] if flow_rates else [None] * 8 - current_dis_flow_rates = flow_rates[-i*8-8:len(flow_rates)-i*8] if flow_rates else [None] * 8 - current_asp_offset = offsets[i:i + 8] if offsets else [None] * 8 - current_dis_offset = offsets[-i*8-8:len(offsets)-i*8] if offsets else [None] * 8 - current_asp_liquid_height = liquid_height[i:i + 8] if liquid_height else [None] * 8 - current_dis_liquid_height = liquid_height[-i*8-8:len(liquid_height)-i*8] if liquid_height else [None] * 8 - current_asp_blow_out_air_volume = blow_out_air_volume[i:i + 8] if blow_out_air_volume else [None] * 8 - current_dis_blow_out_air_volume = blow_out_air_volume[-i*8-8:len(blow_out_air_volume)-i*8] if blow_out_air_volume else [None] * 8 + current_targets = waste_liquid[i : i + 8] + current_reagent_sources = sources[i : i + 8] + current_asp_vols = vols[i : i + 8] + current_dis_vols = vols[i : i + 8] + current_asp_flow_rates = flow_rates[i : i + 8] if flow_rates else [None] * 8 + current_dis_flow_rates = ( + flow_rates[-i * 8 - 8 : len(flow_rates) - i * 8] if flow_rates else [None] * 8 + ) + current_asp_offset = offsets[i : i + 8] if offsets else [None] * 8 + current_dis_offset = offsets[-i * 8 - 8 : len(offsets) - i * 8] if offsets else [None] * 8 + current_asp_liquid_height = liquid_height[i : i + 8] if liquid_height else [None] * 8 + current_dis_liquid_height = ( + liquid_height[-i * 8 - 8 : len(liquid_height) - i * 8] if liquid_height else [None] * 8 + ) + current_asp_blow_out_air_volume = ( + blow_out_air_volume[i : i + 8] if blow_out_air_volume else [None] * 8 + ) + current_dis_blow_out_air_volume = ( + blow_out_air_volume[-i * 8 - 8 : len(blow_out_air_volume) - i * 8] + if blow_out_air_volume + else [None] * 8 + ) await self.aspirate( resources=current_reagent_sources, @@ -838,7 +919,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): if delays is not None and len(delays) > 1: await self.custom_delay(seconds=delays[1]) await self.touch_tip(current_targets) - await self.discard_tips() + await self.discard_tips() except Exception as e: traceback.print_exc() @@ -872,127 +953,136 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): # """A complete *add* (aspirate reagent → dispense into targets) operation.""" # # try: - if is_96_well: - pass # This mode is not verified. - else: - if len(asp_vols) != len(targets): - raise ValueError(f"Length of `asp_vols` {len(asp_vols)} must match `targets` {len(targets)}.") - # 首先应该对任务分组,然后每次1个/8个进行操作处理 - if len(use_channels) == 1: - for _ in range(len(targets)): - tip = [] - for x in range(len(use_channels)): - tip.extend(next(self.current_tip)) - await self.pick_up_tips(tip) + if is_96_well: + pass # This mode is not verified. + else: + if len(asp_vols) != len(targets): + raise ValueError(f"Length of `asp_vols` {len(asp_vols)} must match `targets` {len(targets)}.") + # 首先应该对任务分组,然后每次1个/8个进行操作处理 + if len(use_channels) == 1: + for _ in range(len(targets)): + tip = [] + for x in range(len(use_channels)): + tip.extend(next(self.current_tip)) + await self.pick_up_tips(tip) - await self.aspirate( - resources=[reagent_sources[_]], - vols=[asp_vols[_]], - use_channels=use_channels, - flow_rates=[flow_rates[0]] if flow_rates else None, - offsets=[offsets[0]] if offsets else None, - liquid_height=[liquid_height[0]] if liquid_height else None, - blow_out_air_volume=[blow_out_air_volume[0]] if blow_out_air_volume else None, - spread=spread, + await self.aspirate( + resources=[reagent_sources[_]], + vols=[asp_vols[_]], + use_channels=use_channels, + flow_rates=[flow_rates[0]] if flow_rates else None, + offsets=[offsets[0]] if offsets else None, + liquid_height=[liquid_height[0]] if liquid_height else None, + blow_out_air_volume=[blow_out_air_volume[0]] if blow_out_air_volume else None, + spread=spread, + ) + + if delays is not None: + await self.custom_delay(seconds=delays[0]) + await self.dispense( + resources=[targets[_]], + vols=[dis_vols[_]], + use_channels=use_channels, + flow_rates=[flow_rates[1]] if flow_rates else None, + offsets=[offsets[1]] if offsets else None, + blow_out_air_volume=[blow_out_air_volume[1]] if blow_out_air_volume else None, + liquid_height=[liquid_height[1]] if liquid_height else None, + spread=spread, + ) + + if delays is not None and len(delays) > 1: + await self.custom_delay(seconds=delays[1]) + # 只有在 mix_time 有效时才调用 mix + if mix_time is not None and mix_time > 0: + await self.mix( + targets=[targets[_]], + mix_time=mix_time, + mix_vol=mix_vol, + offsets=offsets if offsets else None, + height_to_bottom=mix_liquid_height if mix_liquid_height else None, + mix_rate=mix_rate if mix_rate else None, ) - - if delays is not None: - await self.custom_delay(seconds=delays[0]) - await self.dispense( - resources=[targets[_]], - vols=[dis_vols[_]], - use_channels=use_channels, - flow_rates=[flow_rates[1]] if flow_rates else None, - offsets=[offsets[1]] if offsets else None, - blow_out_air_volume=[blow_out_air_volume[1]] if blow_out_air_volume else None, - liquid_height=[liquid_height[1]] if liquid_height else None, - spread=spread, + if delays is not None and len(delays) > 1: + await self.custom_delay(seconds=delays[1]) + await self.touch_tip(targets[_]) + await self.discard_tips() + + elif len(use_channels) == 8: + # 对于8个的情况,需要判断此时任务是不是能被8通道移液站来成功处理 + if len(targets) % 8 != 0: + raise ValueError(f"Length of `targets` {len(targets)} must be a multiple of 8 for 8-channel mode.") + + for i in range(0, len(targets), 8): + tip = [] + for _ in range(len(use_channels)): + tip.extend(next(self.current_tip)) + await self.pick_up_tips(tip) + current_targets = targets[i : i + 8] + current_reagent_sources = reagent_sources[i : i + 8] + current_asp_vols = asp_vols[i : i + 8] + current_dis_vols = dis_vols[i : i + 8] + current_asp_flow_rates = flow_rates[i : i + 8] if flow_rates else [None] * 8 + current_dis_flow_rates = ( + flow_rates[-i * 8 - 8 : len(flow_rates) - i * 8] if flow_rates else [None] * 8 + ) + current_asp_offset = offsets[i : i + 8] if offsets else [None] * 8 + current_dis_offset = offsets[-i * 8 - 8 : len(offsets) - i * 8] if offsets else [None] * 8 + current_asp_liquid_height = liquid_height[i : i + 8] if liquid_height else [None] * 8 + current_dis_liquid_height = ( + liquid_height[-i * 8 - 8 : len(liquid_height) - i * 8] if liquid_height else [None] * 8 + ) + current_asp_blow_out_air_volume = ( + blow_out_air_volume[i : i + 8] if blow_out_air_volume else [None] * 8 + ) + current_dis_blow_out_air_volume = ( + blow_out_air_volume[-i * 8 - 8 : len(blow_out_air_volume) - i * 8] + if blow_out_air_volume + else [None] * 8 + ) + + await self.aspirate( + resources=current_reagent_sources, + vols=current_asp_vols, + use_channels=use_channels, + flow_rates=current_asp_flow_rates, + offsets=current_asp_offset, + liquid_height=current_asp_liquid_height, + blow_out_air_volume=current_asp_blow_out_air_volume, + spread=spread, + ) + if delays is not None: + await self.custom_delay(seconds=delays[0]) + await self.dispense( + resources=current_targets, + vols=current_dis_vols, + use_channels=use_channels, + flow_rates=current_dis_flow_rates, + offsets=current_dis_offset, + liquid_height=current_dis_liquid_height, + blow_out_air_volume=current_dis_blow_out_air_volume, + spread=spread, + ) + if delays is not None and len(delays) > 1: + await self.custom_delay(seconds=delays[1]) + + # 只有在 mix_time 有效时才调用 mix + if mix_time is not None and mix_time > 0: + await self.mix( + targets=current_targets, + mix_time=mix_time, + mix_vol=mix_vol, + offsets=offsets if offsets else None, + height_to_bottom=mix_liquid_height if mix_liquid_height else None, + mix_rate=mix_rate if mix_rate else None, ) + if delays is not None and len(delays) > 1: + await self.custom_delay(seconds=delays[1]) + await self.touch_tip(current_targets) + await self.discard_tips() - if delays is not None and len(delays) > 1: - await self.custom_delay(seconds=delays[1]) - # 只有在 mix_time 有效时才调用 mix - if mix_time is not None and mix_time > 0: - await self.mix( - targets=[targets[_]], - mix_time=mix_time, - mix_vol=mix_vol, - offsets=offsets if offsets else None, - height_to_bottom=mix_liquid_height if mix_liquid_height else None, - mix_rate=mix_rate if mix_rate else None, - ) - if delays is not None and len(delays) > 1: - await self.custom_delay(seconds=delays[1]) - await self.touch_tip(targets[_]) - await self.discard_tips() - - elif len(use_channels) == 8: - # 对于8个的情况,需要判断此时任务是不是能被8通道移液站来成功处理 - if len(targets) % 8 != 0: - raise ValueError(f"Length of `targets` {len(targets)} must be a multiple of 8 for 8-channel mode.") - - for i in range(0, len(targets), 8): - tip = [] - for _ in range(len(use_channels)): - tip.extend(next(self.current_tip)) - await self.pick_up_tips(tip) - current_targets = targets[i:i + 8] - current_reagent_sources = reagent_sources[i:i + 8] - current_asp_vols = asp_vols[i:i + 8] - current_dis_vols = dis_vols[i:i + 8] - current_asp_flow_rates = flow_rates[i:i + 8] if flow_rates else [None] * 8 - current_dis_flow_rates = flow_rates[-i*8-8:len(flow_rates)-i*8] if flow_rates else [None] * 8 - current_asp_offset = offsets[i:i + 8] if offsets else [None] * 8 - current_dis_offset = offsets[-i*8-8:len(offsets)-i*8] if offsets else [None] * 8 - current_asp_liquid_height = liquid_height[i:i + 8] if liquid_height else [None] * 8 - current_dis_liquid_height = liquid_height[-i*8-8:len(liquid_height)-i*8] if liquid_height else [None] * 8 - current_asp_blow_out_air_volume = blow_out_air_volume[i:i + 8] if blow_out_air_volume else [None] * 8 - current_dis_blow_out_air_volume = blow_out_air_volume[-i*8-8:len(blow_out_air_volume)-i*8] if blow_out_air_volume else [None] * 8 - - await self.aspirate( - resources=current_reagent_sources, - vols=current_asp_vols, - use_channels=use_channels, - flow_rates=current_asp_flow_rates, - offsets=current_asp_offset, - liquid_height=current_asp_liquid_height, - blow_out_air_volume=current_asp_blow_out_air_volume, - spread=spread, - ) - if delays is not None: - await self.custom_delay(seconds=delays[0]) - await self.dispense( - resources=current_targets, - vols=current_dis_vols, - use_channels=use_channels, - flow_rates=current_dis_flow_rates, - offsets=current_dis_offset, - liquid_height=current_dis_liquid_height, - blow_out_air_volume=current_dis_blow_out_air_volume, - spread=spread, - ) - if delays is not None and len(delays) > 1: - await self.custom_delay(seconds=delays[1]) - - # 只有在 mix_time 有效时才调用 mix - if mix_time is not None and mix_time > 0: - await self.mix( - targets=current_targets, - mix_time=mix_time, - mix_vol=mix_vol, - offsets=offsets if offsets else None, - height_to_bottom=mix_liquid_height if mix_liquid_height else None, - mix_rate=mix_rate if mix_rate else None, - ) - if delays is not None and len(delays) > 1: - await self.custom_delay(seconds=delays[1]) - await self.touch_tip(current_targets) - await self.discard_tips() - - - # except Exception as e: - # traceback.print_exc() - # raise RuntimeError(f"Liquid addition failed: {e}") from e + # except Exception as e: + # traceback.print_exc() + # raise RuntimeError(f"Liquid addition failed: {e}") from e # --------------------------------------------------------------- # TRANSFER LIQUID ------------------------------------------------ @@ -1050,12 +1140,12 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): Number of mix cycles. If *None* (default) no mixing occurs regardless of mix_stage. """ - + # 确保 use_channels 有默认值 if use_channels is None: # 默认使用设备所有通道(例如 8 通道移液站默认就是 0-7) use_channels = list(range(self.channel_num)) if self.channel_num > 0 else [0] - + if is_96_well: pass # This mode is not verified. else: @@ -1064,7 +1154,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): asp_vols = [float(asp_vols)] else: asp_vols = [float(v) for v in asp_vols] - + if isinstance(dis_vols, (int, float)): dis_vols = [float(dis_vols)] else: @@ -1081,37 +1171,79 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): pass if mix_times is not None: mix_times = int(mix_times) - + # 识别传输模式(mix_times 为 None 也应该能正常移液,只是不做 mix) num_sources = len(sources) num_targets = len(targets) - + if num_sources == 1 and num_targets > 1: # 模式1: 一对多 (1 source -> N targets) await self._transfer_one_to_many( - sources[0], targets, tip_racks, use_channels, - asp_vols, dis_vols, asp_flow_rates, dis_flow_rates, - offsets, touch_tip, liquid_height, blow_out_air_volume, - spread, mix_stage, mix_times, mix_vol, mix_rate, - mix_liquid_height, delays + sources[0], + targets, + tip_racks, + use_channels, + asp_vols, + dis_vols, + asp_flow_rates, + dis_flow_rates, + offsets, + touch_tip, + liquid_height, + blow_out_air_volume, + spread, + mix_stage, + mix_times, + mix_vol, + mix_rate, + mix_liquid_height, + delays, ) elif num_sources > 1 and num_targets == 1: # 模式2: 多对一 (N sources -> 1 target) await self._transfer_many_to_one( - sources, targets[0], tip_racks, use_channels, - asp_vols, dis_vols, asp_flow_rates, dis_flow_rates, - offsets, touch_tip, liquid_height, blow_out_air_volume, - spread, mix_stage, mix_times, mix_vol, mix_rate, - mix_liquid_height, delays + sources, + targets[0], + tip_racks, + use_channels, + asp_vols, + dis_vols, + asp_flow_rates, + dis_flow_rates, + offsets, + touch_tip, + liquid_height, + blow_out_air_volume, + spread, + mix_stage, + mix_times, + mix_vol, + mix_rate, + mix_liquid_height, + delays, ) elif num_sources == num_targets: # 模式3: 一对一 (N sources -> N targets) await self._transfer_one_to_one( - sources, targets, tip_racks, use_channels, - asp_vols, dis_vols, asp_flow_rates, dis_flow_rates, - offsets, touch_tip, liquid_height, blow_out_air_volume, - spread, mix_stage, mix_times, mix_vol, mix_rate, - mix_liquid_height, delays + sources, + targets, + tip_racks, + use_channels, + asp_vols, + dis_vols, + asp_flow_rates, + dis_flow_rates, + offsets, + touch_tip, + liquid_height, + blow_out_air_volume, + spread, + mix_stage, + mix_times, + mix_vol, + mix_rate, + mix_liquid_height, + delays, ) else: raise ValueError( @@ -1174,7 +1306,9 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): flow_rates=[asp_flow_rates[_]] if asp_flow_rates and len(asp_flow_rates) > _ else None, offsets=[offsets[_]] if offsets and len(offsets) > _ else None, liquid_height=[liquid_height[_]] if liquid_height and len(liquid_height) > _ else None, - blow_out_air_volume=[blow_out_air_volume[_]] if blow_out_air_volume and len(blow_out_air_volume) > _ else None, + blow_out_air_volume=( + [blow_out_air_volume[_]] if blow_out_air_volume and len(blow_out_air_volume) > _ else None + ), spread=spread, ) if delays is not None: @@ -1185,7 +1319,9 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): use_channels=use_channels, flow_rates=[dis_flow_rates[_]] if dis_flow_rates and len(dis_flow_rates) > _ else None, offsets=[offsets[_]] if offsets and len(offsets) > _ else None, - blow_out_air_volume=[blow_out_air_volume[_]] if blow_out_air_volume and len(blow_out_air_volume) > _ else None, + blow_out_air_volume=( + [blow_out_air_volume[_]] if blow_out_air_volume and len(blow_out_air_volume) > _ else None + ), liquid_height=[liquid_height[_]] if liquid_height and len(liquid_height) > _ else None, spread=spread, ) @@ -1214,18 +1350,18 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): for _ in range(len(use_channels)): tip.extend(next(self.current_tip)) await self.pick_up_tips(tip) - current_targets = targets[i:i + 8] - current_reagent_sources = sources[i:i + 8] - current_asp_vols = asp_vols[i:i + 8] - current_dis_vols = dis_vols[i:i + 8] - current_asp_flow_rates = asp_flow_rates[i:i + 8] if asp_flow_rates else None - current_asp_offset = offsets[i:i + 8] if offsets else [None] * 8 - current_dis_offset = offsets[i:i + 8] if offsets else [None] * 8 - current_asp_liquid_height = liquid_height[i:i + 8] if liquid_height else [None] * 8 - current_dis_liquid_height = liquid_height[i:i + 8] if liquid_height else [None] * 8 - current_asp_blow_out_air_volume = blow_out_air_volume[i:i + 8] if blow_out_air_volume else [None] * 8 - current_dis_blow_out_air_volume = blow_out_air_volume[i:i + 8] if blow_out_air_volume else [None] * 8 - current_dis_flow_rates = dis_flow_rates[i:i + 8] if dis_flow_rates else None + current_targets = targets[i : i + 8] + current_reagent_sources = sources[i : i + 8] + current_asp_vols = asp_vols[i : i + 8] + current_dis_vols = dis_vols[i : i + 8] + current_asp_flow_rates = asp_flow_rates[i : i + 8] if asp_flow_rates else None + current_asp_offset = offsets[i : i + 8] if offsets else [None] * 8 + current_dis_offset = offsets[i : i + 8] if offsets else [None] * 8 + current_asp_liquid_height = liquid_height[i : i + 8] if liquid_height else [None] * 8 + current_dis_liquid_height = liquid_height[i : i + 8] if liquid_height else [None] * 8 + current_asp_blow_out_air_volume = blow_out_air_volume[i : i + 8] if blow_out_air_volume else [None] * 8 + current_dis_blow_out_air_volume = blow_out_air_volume[i : i + 8] if blow_out_air_volume else [None] * 8 + current_dis_flow_rates = dis_flow_rates[i : i + 8] if dis_flow_rates else None if mix_stage in ["before", "both"] and mix_times is not None and mix_times > 0: await self.mix( @@ -1275,7 +1411,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): if delays is not None and len(delays) > 1: await self.custom_delay(seconds=delays[1]) await self.touch_tip(current_targets) - await self.discard_tips([0,1,2,3,4,5,6,7]) + await self.discard_tips([0, 1, 2, 3, 4, 5, 6, 7]) async def _transfer_one_to_many( self, @@ -1307,7 +1443,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): asp_vol = asp_vols[0] if asp_vols[0] >= total_asp_vol else total_asp_vol else: raise ValueError("For one-to-many mode, `asp_vols` should be a single value or list with one element.") - + if len(dis_vols) != len(targets): raise ValueError(f"Length of `dis_vols` {len(dis_vols)} must match `targets` {len(targets)}.") @@ -1324,7 +1460,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): targets=[target], mix_time=mix_times, mix_vol=mix_vol, - offsets=offsets[idx:idx + 1] if offsets and len(offsets) > idx else None, + offsets=offsets[idx : idx + 1] if offsets and len(offsets) > idx else None, height_to_bottom=mix_liquid_height if mix_liquid_height else None, mix_rate=mix_rate if mix_rate else None, ) @@ -1337,13 +1473,15 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): flow_rates=[asp_flow_rates[0]] if asp_flow_rates and len(asp_flow_rates) > 0 else None, offsets=[offsets[0]] if offsets and len(offsets) > 0 else None, liquid_height=[liquid_height[0]] if liquid_height and len(liquid_height) > 0 else None, - blow_out_air_volume=[blow_out_air_volume[0]] if blow_out_air_volume and len(blow_out_air_volume) > 0 else None, + blow_out_air_volume=( + [blow_out_air_volume[0]] if blow_out_air_volume and len(blow_out_air_volume) > 0 else None + ), spread=spread, ) - + if delays is not None: await self.custom_delay(seconds=delays[0]) - + # 分多次分液到不同的目标容器 for idx, target in enumerate(targets): await self.dispense( @@ -1352,7 +1490,9 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): use_channels=use_channels, flow_rates=[dis_flow_rates[idx]] if dis_flow_rates and len(dis_flow_rates) > idx else None, offsets=[offsets[idx]] if offsets and len(offsets) > idx else None, - blow_out_air_volume=[blow_out_air_volume[idx]] if blow_out_air_volume and len(blow_out_air_volume) > idx else None, + blow_out_air_volume=( + [blow_out_air_volume[idx]] if blow_out_air_volume and len(blow_out_air_volume) > idx else None + ), liquid_height=[liquid_height[idx]] if liquid_height and len(liquid_height) > idx else None, spread=spread, ) @@ -1363,46 +1503,54 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): targets=[target], mix_time=mix_times, mix_vol=mix_vol, - offsets=offsets[idx:idx+1] if offsets else None, + offsets=offsets[idx : idx + 1] if offsets else None, height_to_bottom=mix_liquid_height if mix_liquid_height else None, mix_rate=mix_rate if mix_rate else None, ) if touch_tip: await self.touch_tip([target]) - + await self.discard_tips(use_channels=use_channels) - + elif len(use_channels) == 8: # 8通道模式:需要确保目标数量是8的倍数 if len(targets) % 8 != 0: raise ValueError(f"For 8-channel mode, number of targets {len(targets)} must be a multiple of 8.") - + # 每次处理8个目标 for i in range(0, len(targets), 8): tip = [] for _ in range(len(use_channels)): tip.extend(next(self.current_tip)) await self.pick_up_tips(tip) - - current_targets = targets[i:i + 8] - current_dis_vols = dis_vols[i:i + 8] - + + current_targets = targets[i : i + 8] + current_dis_vols = dis_vols[i : i + 8] + # 8个通道都从同一个源容器吸液,每个通道的吸液体积等于对应的分液体积 - current_asp_flow_rates = asp_flow_rates[0:1] * 8 if asp_flow_rates and len(asp_flow_rates) > 0 else None + current_asp_flow_rates = ( + asp_flow_rates[0:1] * 8 if asp_flow_rates and len(asp_flow_rates) > 0 else None + ) current_asp_offset = offsets[0:1] * 8 if offsets and len(offsets) > 0 else [None] * 8 - current_asp_liquid_height = liquid_height[0:1] * 8 if liquid_height and len(liquid_height) > 0 else [None] * 8 - current_asp_blow_out_air_volume = blow_out_air_volume[0:1] * 8 if blow_out_air_volume and len(blow_out_air_volume) > 0 else [None] * 8 - + current_asp_liquid_height = ( + liquid_height[0:1] * 8 if liquid_height and len(liquid_height) > 0 else [None] * 8 + ) + current_asp_blow_out_air_volume = ( + blow_out_air_volume[0:1] * 8 + if blow_out_air_volume and len(blow_out_air_volume) > 0 + else [None] * 8 + ) + if mix_stage in ["before", "both"] and mix_times is not None and mix_times > 0: await self.mix( targets=current_targets, mix_time=mix_times, mix_vol=mix_vol, - offsets=offsets[i:i + 8] if offsets else None, + offsets=offsets[i : i + 8] if offsets else None, height_to_bottom=mix_liquid_height if mix_liquid_height else None, mix_rate=mix_rate if mix_rate else None, ) - + # 从源容器吸液(8个通道都从同一个源,但每个通道的吸液体积不同) await self.aspirate( resources=[source] * 8, # 8个通道都从同一个源 @@ -1414,16 +1562,16 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): blow_out_air_volume=current_asp_blow_out_air_volume, spread=spread, ) - + if delays is not None: await self.custom_delay(seconds=delays[0]) - + # 分液到8个目标 - current_dis_flow_rates = dis_flow_rates[i:i + 8] if dis_flow_rates else None - current_dis_offset = offsets[i:i + 8] if offsets else [None] * 8 - current_dis_liquid_height = liquid_height[i:i + 8] if liquid_height else [None] * 8 - current_dis_blow_out_air_volume = blow_out_air_volume[i:i + 8] if blow_out_air_volume else [None] * 8 - + current_dis_flow_rates = dis_flow_rates[i : i + 8] if dis_flow_rates else None + current_dis_offset = offsets[i : i + 8] if offsets else [None] * 8 + current_dis_liquid_height = liquid_height[i : i + 8] if liquid_height else [None] * 8 + current_dis_blow_out_air_volume = blow_out_air_volume[i : i + 8] if blow_out_air_volume else [None] * 8 + await self.dispense( resources=current_targets, vols=current_dis_vols, @@ -1434,10 +1582,10 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): liquid_height=current_dis_liquid_height, spread=spread, ) - + if delays is not None and len(delays) > 1: await self.custom_delay(seconds=delays[1]) - + if mix_stage in ["after", "both"] and mix_times is not None and mix_times > 0: await self.mix( targets=current_targets, @@ -1447,11 +1595,11 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): height_to_bottom=mix_liquid_height if mix_liquid_height else None, mix_rate=mix_rate if mix_rate else None, ) - + if touch_tip: await self.touch_tip(current_targets) - - await self.discard_tips([0,1,2,3,4,5,6,7]) + + await self.discard_tips([0, 1, 2, 3, 4, 5, 6, 7]) async def _transfer_many_to_one( self, @@ -1479,7 +1627,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): # 验证和扩展体积参数 if len(asp_vols) != len(sources): raise ValueError(f"Length of `asp_vols` {len(asp_vols)} must match `sources` {len(sources)}.") - + # 支持两种模式: # 1. dis_vols 为单个值:所有源汇总,使用总吸液体积或指定分液体积 # 2. dis_vols 长度等于 asp_vols:每个源按不同比例分液(按比例混合) @@ -1509,7 +1657,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): height_to_bottom=mix_liquid_height if mix_liquid_height else None, mix_rate=mix_rate if mix_rate else None, ) - + # 从每个源容器吸液并分液到目标容器 for idx, source in enumerate(sources): tip = [] @@ -1524,13 +1672,15 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): flow_rates=[asp_flow_rates[idx]] if asp_flow_rates and len(asp_flow_rates) > idx else None, offsets=[offsets[idx]] if offsets and len(offsets) > idx else None, liquid_height=[liquid_height[idx]] if liquid_height and len(liquid_height) > idx else None, - blow_out_air_volume=[blow_out_air_volume[idx]] if blow_out_air_volume and len(blow_out_air_volume) > idx else None, + blow_out_air_volume=( + [blow_out_air_volume[idx]] if blow_out_air_volume and len(blow_out_air_volume) > idx else None + ), spread=spread, ) - + if delays is not None: await self.custom_delay(seconds=delays[0]) - + # 分液到目标容器 if use_proportional_mixing: # 按不同比例混合:使用对应的 dis_vols @@ -1538,15 +1688,19 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): dis_flow_rate = dis_flow_rates[idx] if dis_flow_rates and len(dis_flow_rates) > idx else None dis_offset = offsets[idx] if offsets and len(offsets) > idx else None dis_liquid_height = liquid_height[idx] if liquid_height and len(liquid_height) > idx else None - dis_blow_out = blow_out_air_volume[idx] if blow_out_air_volume and len(blow_out_air_volume) > idx else None + dis_blow_out = ( + blow_out_air_volume[idx] if blow_out_air_volume and len(blow_out_air_volume) > idx else None + ) else: # 标准模式:分液体积等于吸液体积 dis_vol = asp_vols[idx] dis_flow_rate = dis_flow_rates[0] if dis_flow_rates and len(dis_flow_rates) > 0 else None dis_offset = offsets[0] if offsets and len(offsets) > 0 else None dis_liquid_height = liquid_height[0] if liquid_height and len(liquid_height) > 0 else None - dis_blow_out = blow_out_air_volume[0] if blow_out_air_volume and len(blow_out_air_volume) > 0 else None - + dis_blow_out = ( + blow_out_air_volume[0] if blow_out_air_volume and len(blow_out_air_volume) > 0 else None + ) + await self.dispense( resources=[target], vols=[dis_vol], @@ -1557,12 +1711,12 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): liquid_height=[dis_liquid_height] if dis_liquid_height is not None else None, spread=spread, ) - + if delays is not None and len(delays) > 1: await self.custom_delay(seconds=delays[1]) - + await self.discard_tips(use_channels=use_channels) - + # 最后在目标容器中混合(如果需要) if mix_stage in ["after", "both"] and mix_times is not None and mix_times > 0: await self.mix( @@ -1573,15 +1727,15 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): height_to_bottom=mix_liquid_height if mix_liquid_height else None, mix_rate=mix_rate if mix_rate else None, ) - + if touch_tip: await self.touch_tip([target]) - + elif len(use_channels) == 8: # 8通道模式:需要确保源数量是8的倍数 if len(sources) % 8 != 0: raise ValueError(f"For 8-channel mode, number of sources {len(sources)} must be a multiple of 8.") - + # 每次处理8个源 if mix_stage in ["before", "both"] and mix_times is not None and mix_times > 0: await self.mix( @@ -1598,14 +1752,14 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): for _ in range(len(use_channels)): tip.extend(next(self.current_tip)) await self.pick_up_tips(tip) - - current_sources = sources[i:i + 8] - current_asp_vols = asp_vols[i:i + 8] - current_asp_flow_rates = asp_flow_rates[i:i + 8] if asp_flow_rates else None - current_asp_offset = offsets[i:i + 8] if offsets else [None] * 8 - current_asp_liquid_height = liquid_height[i:i + 8] if liquid_height else [None] * 8 - current_asp_blow_out_air_volume = blow_out_air_volume[i:i + 8] if blow_out_air_volume else [None] * 8 - + + current_sources = sources[i : i + 8] + current_asp_vols = asp_vols[i : i + 8] + current_asp_flow_rates = asp_flow_rates[i : i + 8] if asp_flow_rates else None + current_asp_offset = offsets[i : i + 8] if offsets else [None] * 8 + current_asp_liquid_height = liquid_height[i : i + 8] if liquid_height else [None] * 8 + current_asp_blow_out_air_volume = blow_out_air_volume[i : i + 8] if blow_out_air_volume else [None] * 8 + # 从8个源容器吸液 await self.aspirate( resources=current_sources, @@ -1617,26 +1771,30 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): liquid_height=current_asp_liquid_height, spread=spread, ) - + if delays is not None: await self.custom_delay(seconds=delays[0]) - + # 分液到目标容器(每个通道分液到同一个目标) if use_proportional_mixing: # 按比例混合:使用对应的 dis_vols - current_dis_vols = dis_vols[i:i + 8] - current_dis_flow_rates = dis_flow_rates[i:i + 8] if dis_flow_rates else None - current_dis_offset = offsets[i:i + 8] if offsets else [None] * 8 - current_dis_liquid_height = liquid_height[i:i + 8] if liquid_height else [None] * 8 - current_dis_blow_out_air_volume = blow_out_air_volume[i:i + 8] if blow_out_air_volume else [None] * 8 + current_dis_vols = dis_vols[i : i + 8] + current_dis_flow_rates = dis_flow_rates[i : i + 8] if dis_flow_rates else None + current_dis_offset = offsets[i : i + 8] if offsets else [None] * 8 + current_dis_liquid_height = liquid_height[i : i + 8] if liquid_height else [None] * 8 + current_dis_blow_out_air_volume = ( + blow_out_air_volume[i : i + 8] if blow_out_air_volume else [None] * 8 + ) else: # 标准模式:每个通道分液体积等于其吸液体积 current_dis_vols = current_asp_vols current_dis_flow_rates = dis_flow_rates[0:1] * 8 if dis_flow_rates else None current_dis_offset = offsets[0:1] * 8 if offsets else [None] * 8 current_dis_liquid_height = liquid_height[0:1] * 8 if liquid_height else [None] * 8 - current_dis_blow_out_air_volume = blow_out_air_volume[0:1] * 8 if blow_out_air_volume else [None] * 8 - + current_dis_blow_out_air_volume = ( + blow_out_air_volume[0:1] * 8 if blow_out_air_volume else [None] * 8 + ) + await self.dispense( resources=[target] * 8, # 8个通道都分到同一个目标 vols=current_dis_vols, @@ -1647,12 +1805,12 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): liquid_height=current_dis_liquid_height, spread=spread, ) - + if delays is not None and len(delays) > 1: await self.custom_delay(seconds=delays[1]) - - await self.discard_tips([0,1,2,3,4,5,6,7]) - + + await self.discard_tips([0, 1, 2, 3, 4, 5, 6, 7]) + # 最后在目标容器中混合(如果需要) if mix_stage in ["after", "both"] and mix_times is not None and mix_times > 0: await self.mix( @@ -1663,7 +1821,7 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): height_to_bottom=mix_liquid_height if mix_liquid_height else None, mix_rate=mix_rate if mix_rate else None, ) - + if touch_tip: await self.touch_tip([target]) @@ -1671,7 +1829,6 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): # traceback.print_exc() # raise RuntimeError(f"Liquid addition failed: {e}") from e - # --------------------------------------------------------------- # Helper utilities # --------------------------------------------------------------- @@ -1692,7 +1849,6 @@ class LiquidHandlerAbstract(LiquidHandlerMiddleware): print(f"Current time: {time.strftime('%H:%M:%S')}") async def touch_tip(self, targets: Sequence[Container]): - """Touch the tip to the side of the well.""" if not self.support_touch_tip: diff --git a/unilabos/devices/liquid_handling/prcxi/prcxi.py b/unilabos/devices/liquid_handling/prcxi/prcxi.py index e0c7e80..4f96255 100644 --- a/unilabos/devices/liquid_handling/prcxi/prcxi.py +++ b/unilabos/devices/liquid_handling/prcxi/prcxi.py @@ -30,9 +30,30 @@ from pylabrobot.liquid_handling.standard import ( ResourceMove, ResourceDrop, ) -from pylabrobot.resources import ResourceHolder, ResourceStack, Tip, Deck, Plate, Well, TipRack, Resource, Container, Coordinate, TipSpot, Trash, PlateAdapter, TubeRack +from pylabrobot.resources import ( + ResourceHolder, + ResourceStack, + Tip, + Deck, + Plate, + Well, + TipRack, + Resource, + Container, + Coordinate, + TipSpot, + Trash, + PlateAdapter, + TubeRack, +) -from unilabos.devices.liquid_handling.liquid_handler_abstract import LiquidHandlerAbstract, SimpleReturn +from unilabos.devices.liquid_handling.liquid_handler_abstract import ( + LiquidHandlerAbstract, + SimpleReturn, + SetLiquidReturn, + SetLiquidFromPlateReturn, +) +from unilabos.registry.placeholder_type import ResourceSlot from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode @@ -80,6 +101,7 @@ class PRCXI9300Deck(Deck): self.slots[slot - 1] = resource super().assign_child_resource(resource, location=self.slot_locations[slot - 1]) + class PRCXI9300Container(Plate): """PRCXI 9300 的专用 Container 类,继承自 Plate,用于槽位定位和未知模块。 @@ -108,20 +130,29 @@ class PRCXI9300Container(Plate): def serialize_state(self) -> Dict[str, Dict[str, Any]]: data = super().serialize_state() data.update(self._unilabos_state) - return data + return data + + class PRCXI9300Plate(Plate): - """ + """ 专用孔板类: 1. 继承自 PLR 原生 Plate,保留所有物理特性。 2. 增加 material_info 参数,用于在初始化时直接绑定 Unilab UUID。 """ - def __init__(self, name: str, size_x: float, size_y: float, size_z: float, - category: str = "plate", - ordered_items: collections.OrderedDict = None, - ordering: Optional[collections.OrderedDict] = None, - model: Optional[str] = None, - material_info: Optional[Dict[str, Any]] = None, - **kwargs): + + def __init__( + self, + name: str, + size_x: float, + size_y: float, + size_z: float, + category: str = "plate", + ordered_items: collections.OrderedDict = None, + ordering: Optional[collections.OrderedDict] = None, + model: Optional[str] = None, + material_info: Optional[Dict[str, Any]] = None, + **kwargs, + ): # 如果 ordered_items 不为 None,直接使用 if ordered_items is not None: items = ordered_items @@ -142,40 +173,34 @@ class PRCXI9300Plate(Plate): else: items = None ordering_param = None - + # 根据情况传递不同的参数 if items is not None: - super().__init__(name, size_x, size_y, size_z, - ordered_items=items, - category=category, - model=model, **kwargs) + super().__init__( + name, size_x, size_y, size_z, ordered_items=items, category=category, model=model, **kwargs + ) elif ordering_param is not None: # 传递 ordering 参数,让 Plate 自己创建 Well 对象 - super().__init__(name, size_x, size_y, size_z, - ordering=ordering_param, - category=category, - model=model, **kwargs) + super().__init__( + name, size_x, size_y, size_z, ordering=ordering_param, category=category, model=model, **kwargs + ) else: - super().__init__(name, size_x, size_y, size_z, - category=category, - model=model, **kwargs) - + super().__init__(name, size_x, size_y, size_z, category=category, model=model, **kwargs) + self._unilabos_state = {} if material_info: self._unilabos_state["Material"] = material_info - def load_state(self, state: Dict[str, Any]) -> None: super().load_state(state) self._unilabos_state = state - def serialize_state(self) -> Dict[str, Dict[str, Any]]: try: data = super().serialize_state() except AttributeError: data = {} - if hasattr(self, '_unilabos_state') and self._unilabos_state: + if hasattr(self, "_unilabos_state") and self._unilabos_state: safe_state = {} for k, v in self._unilabos_state.items(): # 如果是 Material 字典,深入检查 @@ -188,23 +213,32 @@ class PRCXI9300Plate(Plate): else: # 打印日志提醒(可选) # print(f"Warning: Removing non-serializable key {mk} from {self.name}") - pass + pass safe_state[k] = safe_material # 其他顶层属性也进行类型检查 elif isinstance(v, (str, int, float, bool, list, dict, type(None))): safe_state[k] = v - + data.update(safe_state) - return data # 其他顶层属性也进行类型检查 + return data # 其他顶层属性也进行类型检查 + + class PRCXI9300TipRack(TipRack): - """ 专用吸头盒类 """ - def __init__(self, name: str, size_x: float, size_y: float, size_z: float, - category: str = "tip_rack", - ordered_items: collections.OrderedDict = None, - ordering: Optional[collections.OrderedDict] = None, - model: Optional[str] = None, - material_info: Optional[Dict[str, Any]] = None, - **kwargs): + """专用吸头盒类""" + + def __init__( + self, + name: str, + size_x: float, + size_y: float, + size_z: float, + category: str = "tip_rack", + ordered_items: collections.OrderedDict = None, + ordering: Optional[collections.OrderedDict] = None, + model: Optional[str] = None, + material_info: Optional[Dict[str, Any]] = None, + **kwargs, + ): # 如果 ordered_items 不为 None,直接使用 if ordered_items is not None: items = ordered_items @@ -225,27 +259,23 @@ class PRCXI9300TipRack(TipRack): else: items = None ordering_param = None - + # 根据情况传递不同的参数 if items is not None: - super().__init__(name, size_x, size_y, size_z, - ordered_items=items, - category=category, - model=model, **kwargs) + super().__init__( + name, size_x, size_y, size_z, ordered_items=items, category=category, model=model, **kwargs + ) elif ordering_param is not None: # 传递 ordering 参数,让 TipRack 自己创建 Tip 对象 - super().__init__(name, size_x, size_y, size_z, - ordering=ordering_param, - category=category, - model=model, **kwargs) + super().__init__( + name, size_x, size_y, size_z, ordering=ordering_param, category=category, model=model, **kwargs + ) else: - super().__init__(name, size_x, size_y, size_z, - category=category, - model=model, **kwargs) + super().__init__(name, size_x, size_y, size_z, category=category, model=model, **kwargs) self._unilabos_state = {} if material_info: self._unilabos_state["Material"] = material_info - + def load_state(self, state: Dict[str, Any]) -> None: super().load_state(state) self._unilabos_state = state @@ -255,7 +285,7 @@ class PRCXI9300TipRack(TipRack): data = super().serialize_state() except AttributeError: data = {} - if hasattr(self, '_unilabos_state') and self._unilabos_state: + if hasattr(self, "_unilabos_state") and self._unilabos_state: safe_state = {} for k, v in self._unilabos_state.items(): # 如果是 Material 字典,深入检查 @@ -268,26 +298,33 @@ class PRCXI9300TipRack(TipRack): else: # 打印日志提醒(可选) # print(f"Warning: Removing non-serializable key {mk} from {self.name}") - pass + pass safe_state[k] = safe_material # 其他顶层属性也进行类型检查 elif isinstance(v, (str, int, float, bool, list, dict, type(None))): safe_state[k] = v - + data.update(safe_state) return data - + + class PRCXI9300Trash(Trash): """PRCXI 9300 的专用 Trash 类,继承自 Trash。 该类定义了 PRCXI 9300 的工作台布局和槽位信息。 """ - def __init__(self, name: str, size_x: float, size_y: float, size_z: float, - category: str = "trash", - material_info: Optional[Dict[str, Any]] = None, - **kwargs): - + def __init__( + self, + name: str, + size_x: float, + size_y: float, + size_z: float, + category: str = "trash", + material_info: Optional[Dict[str, Any]] = None, + **kwargs, + ): + if name != "trash": print(f"Warning: PRCXI9300Trash usually expects name='trash' for backend logic, but got '{name}'.") super().__init__(name, size_x, size_y, size_z, **kwargs) @@ -306,7 +343,7 @@ class PRCXI9300Trash(Trash): data = super().serialize_state() except AttributeError: data = {} - if hasattr(self, '_unilabos_state') and self._unilabos_state: + if hasattr(self, "_unilabos_state") and self._unilabos_state: safe_state = {} for k, v in self._unilabos_state.items(): # 如果是 Material 字典,深入检查 @@ -319,29 +356,37 @@ class PRCXI9300Trash(Trash): else: # 打印日志提醒(可选) # print(f"Warning: Removing non-serializable key {mk} from {self.name}") - pass + pass safe_state[k] = safe_material # 其他顶层属性也进行类型检查 elif isinstance(v, (str, int, float, bool, list, dict, type(None))): safe_state[k] = v - + data.update(safe_state) return data + class PRCXI9300TubeRack(TubeRack): """ 专用管架类:用于 EP 管架、试管架等。 继承自 PLR 的 TubeRack,并支持注入 material_info (UUID)。 """ - def __init__(self, name: str, size_x: float, size_y: float, size_z: float, - category: str = "tube_rack", - items: Optional[Dict[str, Any]] = None, - ordered_items: Optional[OrderedDict] = None, - ordering: Optional[OrderedDict] = None, - model: Optional[str] = None, - material_info: Optional[Dict[str, Any]] = None, - **kwargs): - + + def __init__( + self, + name: str, + size_x: float, + size_y: float, + size_z: float, + category: str = "tube_rack", + items: Optional[Dict[str, Any]] = None, + ordered_items: Optional[OrderedDict] = None, + ordering: Optional[OrderedDict] = None, + model: Optional[str] = None, + material_info: Optional[Dict[str, Any]] = None, + **kwargs, + ): + # 如果 ordered_items 不为 None,直接使用 if ordered_items is not None: items_to_pass = ordered_items @@ -367,24 +412,16 @@ class PRCXI9300TubeRack(TubeRack): else: items_to_pass = None ordering_param = None - + # 根据情况传递不同的参数 if items_to_pass is not None: - super().__init__(name, size_x, size_y, size_z, - ordered_items=items_to_pass, - model=model, - **kwargs) + super().__init__(name, size_x, size_y, size_z, ordered_items=items_to_pass, model=model, **kwargs) elif ordering_param is not None: # 传递 ordering 参数,让 TubeRack 自己创建 Tube 对象 - super().__init__(name, size_x, size_y, size_z, - ordering=ordering_param, - model=model, - **kwargs) + super().__init__(name, size_x, size_y, size_z, ordering=ordering_param, model=model, **kwargs) else: - super().__init__(name, size_x, size_y, size_z, - model=model, - **kwargs) - + super().__init__(name, size_x, size_y, size_z, model=model, **kwargs) + self._unilabos_state = {} if material_info: self._unilabos_state["Material"] = material_info @@ -394,7 +431,7 @@ class PRCXI9300TubeRack(TubeRack): data = super().serialize_state() except AttributeError: data = {} - if hasattr(self, '_unilabos_state') and self._unilabos_state: + if hasattr(self, "_unilabos_state") and self._unilabos_state: safe_state = {} for k, v in self._unilabos_state.items(): # 如果是 Material 字典,深入检查 @@ -407,33 +444,41 @@ class PRCXI9300TubeRack(TubeRack): else: # 打印日志提醒(可选) # print(f"Warning: Removing non-serializable key {mk} from {self.name}") - pass + pass safe_state[k] = safe_material # 其他顶层属性也进行类型检查 elif isinstance(v, (str, int, float, bool, list, dict, type(None))): safe_state[k] = v - + data.update(safe_state) return data + class PRCXI9300PlateAdapter(PlateAdapter): """ 专用板式适配器类:用于承载 Plate 的底座(如 PCR 适配器、磁吸架等)。 支持注入 material_info (UUID)。 """ - def __init__(self, name: str, size_x: float, size_y: float, size_z: float, - category: str = "plate_adapter", - model: Optional[str] = None, - material_info: Optional[Dict[str, Any]] = None, - # 参数给予默认值 (标准96孔板尺寸) - adapter_hole_size_x: float = 127.76, - adapter_hole_size_y: float = 85.48, - adapter_hole_size_z: float = 10.0, # 假设凹槽深度或板子放置高度 - dx: Optional[float] = None, - dy: Optional[float] = None, - dz: float = 0.0, # 默认Z轴偏移 - **kwargs): - + + def __init__( + self, + name: str, + size_x: float, + size_y: float, + size_z: float, + category: str = "plate_adapter", + model: Optional[str] = None, + material_info: Optional[Dict[str, Any]] = None, + # 参数给予默认值 (标准96孔板尺寸) + adapter_hole_size_x: float = 127.76, + adapter_hole_size_y: float = 85.48, + adapter_hole_size_z: float = 10.0, # 假设凹槽深度或板子放置高度 + dx: Optional[float] = None, + dy: Optional[float] = None, + dz: float = 0.0, # 默认Z轴偏移 + **kwargs, + ): + # 自动居中计算:如果未指定 dx/dy,则根据适配器尺寸和孔尺寸计算居中位置 if dx is None: dx = (size_x - adapter_hole_size_x) / 2 @@ -441,20 +486,20 @@ class PRCXI9300PlateAdapter(PlateAdapter): dy = (size_y - adapter_hole_size_y) / 2 super().__init__( - name=name, - size_x=size_x, - size_y=size_y, - size_z=size_z, + name=name, + size_x=size_x, + size_y=size_y, + size_z=size_z, dx=dx, dy=dy, dz=dz, adapter_hole_size_x=adapter_hole_size_x, adapter_hole_size_y=adapter_hole_size_y, adapter_hole_size_z=adapter_hole_size_z, - model=model, - **kwargs + model=model, + **kwargs, ) - + self._unilabos_state = {} if material_info: self._unilabos_state["Material"] = material_info @@ -464,7 +509,7 @@ class PRCXI9300PlateAdapter(PlateAdapter): data = super().serialize_state() except AttributeError: data = {} - if hasattr(self, '_unilabos_state') and self._unilabos_state: + if hasattr(self, "_unilabos_state") and self._unilabos_state: safe_state = {} for k, v in self._unilabos_state.items(): # 如果是 Material 字典,深入检查 @@ -477,15 +522,16 @@ class PRCXI9300PlateAdapter(PlateAdapter): else: # 打印日志提醒(可选) # print(f"Warning: Removing non-serializable key {mk} from {self.name}") - pass + pass safe_state[k] = safe_material # 其他顶层属性也进行类型检查 elif isinstance(v, (str, int, float, bool, list, dict, type(None))): safe_state[k] = v - + data.update(safe_state) return data + class PRCXI9300Handler(LiquidHandlerAbstract): support_touch_tip = False @@ -518,7 +564,9 @@ class PRCXI9300Handler(LiquidHandlerAbstract): if "Material" in child.children[0]._unilabos_state: number = int(child.name.replace("T", "")) tablets_info.append( - WorkTablets(Number=number, Code=f"T{number}", Material=child.children[0]._unilabos_state["Material"]) + WorkTablets( + Number=number, Code=f"T{number}", Material=child.children[0]._unilabos_state["Material"] + ) ) if is_9320: print("当前设备是9320") @@ -538,9 +586,14 @@ class PRCXI9300Handler(LiquidHandlerAbstract): super().post_init(ros_node) self._unilabos_backend.post_init(ros_node) - def set_liquid(self, wells: list[Well], liquid_names: list[str], volumes: list[float]) -> SimpleReturn: + def set_liquid(self, wells: list[Well], liquid_names: list[str], volumes: list[float]) -> SetLiquidReturn: return super().set_liquid(wells, liquid_names, volumes) + def set_liquid_from_plate( + self, plate: ResourceSlot, well_names: list[str], liquid_names: list[str], volumes: list[float] + ) -> SetLiquidFromPlateReturn: + return super().set_liquid_from_plate(plate, well_names, liquid_names, volumes) + def set_group(self, group_name: str, wells: List[Well], volumes: List[float]): return super().set_group(group_name, wells, volumes) @@ -799,7 +852,8 @@ class PRCXI9300Handler(LiquidHandlerAbstract): return await self._unilabos_backend.shaker_action(time, module_no, amplitude, is_wait) async def heater_action(self, temperature: float, time: int): - return await self._unilabos_backend.heater_action(temperature, time) + return await self._unilabos_backend.heater_action(temperature, time) + async def move_plate( self, plate: Plate, @@ -822,10 +876,11 @@ class PRCXI9300Handler(LiquidHandlerAbstract): drop_direction, pickup_direction, pickup_distance_from_top, - target_plate_number = to, + target_plate_number=to, **backend_kwargs, ) + class PRCXI9300Backend(LiquidHandlerBackend): """PRCXI 9300 的后端实现,继承自 LiquidHandlerBackend。 @@ -878,31 +933,28 @@ class PRCXI9300Backend(LiquidHandlerBackend): self.steps_todo_list.append(step) return step - async def pick_up_resource(self, pickup: ResourcePickup, **backend_kwargs): - - resource=pickup.resource - offset=pickup.offset - pickup_distance_from_top=pickup.pickup_distance_from_top - direction=pickup.direction + + resource = pickup.resource + offset = pickup.offset + pickup_distance_from_top = pickup.pickup_distance_from_top + direction = pickup.direction plate_number = int(resource.parent.name.replace("T", "")) is_whole_plate = True balance_height = 0 step = self.api_client.clamp_jaw_pick_up(plate_number, is_whole_plate, balance_height) - + self.steps_todo_list.append(step) return step async def drop_resource(self, drop: ResourceDrop, **backend_kwargs): - plate_number = None target_plate_number = backend_kwargs.get("target_plate_number", None) if target_plate_number is not None: plate_number = int(target_plate_number.name.replace("T", "")) - is_whole_plate = True balance_height = 0 if plate_number is None: @@ -911,7 +963,6 @@ class PRCXI9300Backend(LiquidHandlerBackend): self.steps_todo_list.append(step) return step - async def heater_action(self, temperature: float, time: int): print(f"\n\nHeater action: temperature={temperature}, time={time}\n\n") # return await self.api_client.heater_action(temperature, time) @@ -968,7 +1019,7 @@ class PRCXI9300Backend(LiquidHandlerBackend): error_code = self.api_client.get_error_code() if error_code: print(f"PRCXI9300 error code detected: {error_code}") - + # 清除错误代码 self.api_client.clear_error_code() print("PRCXI9300 error code cleared.") @@ -976,11 +1027,11 @@ class PRCXI9300Backend(LiquidHandlerBackend): # 执行重置 print("Starting PRCXI9300 reset...") self.api_client.call("IAutomation", "Reset") - + # 检查重置状态并等待完成 while not self.is_reset_ok: print("Waiting for PRCXI9300 to reset...") - if hasattr(self, '_ros_node') and self._ros_node is not None: + if hasattr(self, "_ros_node") and self._ros_node is not None: await self._ros_node.sleep(1) else: await asyncio.sleep(1) @@ -998,7 +1049,7 @@ class PRCXI9300Backend(LiquidHandlerBackend): """Pick up tips from the specified resource.""" # INSERT_YOUR_CODE # Ensure use_channels is converted to a list of ints if it's an array - if hasattr(use_channels, 'tolist'): + if hasattr(use_channels, "tolist"): _use_channels = use_channels.tolist() else: _use_channels = list(use_channels) if use_channels is not None else None @@ -1052,7 +1103,7 @@ class PRCXI9300Backend(LiquidHandlerBackend): async def drop_tips(self, ops: List[Drop], use_channels: List[int] = None): """Pick up tips from the specified resource.""" - if hasattr(use_channels, 'tolist'): + if hasattr(use_channels, "tolist"): _use_channels = use_channels.tolist() else: _use_channels = list(use_channels) if use_channels is not None else None @@ -1135,7 +1186,7 @@ class PRCXI9300Backend(LiquidHandlerBackend): none_keys: List[str] = [], ): """Mix liquid in the specified resources.""" - + plate_indexes = [] for op in targets: deck = op.parent.parent.parent @@ -1178,7 +1229,7 @@ class PRCXI9300Backend(LiquidHandlerBackend): async def aspirate(self, ops: List[SingleChannelAspiration], use_channels: List[int] = None): """Aspirate liquid from the specified resources.""" - if hasattr(use_channels, 'tolist'): + if hasattr(use_channels, "tolist"): _use_channels = use_channels.tolist() else: _use_channels = list(use_channels) if use_channels is not None else None @@ -1235,7 +1286,7 @@ class PRCXI9300Backend(LiquidHandlerBackend): async def dispense(self, ops: List[SingleChannelDispense], use_channels: List[int] = None): """Dispense liquid into the specified resources.""" - if hasattr(use_channels, 'tolist'): + if hasattr(use_channels, "tolist"): _use_channels = use_channels.tolist() else: _use_channels = list(use_channels) if use_channels is not None else None @@ -1416,7 +1467,6 @@ class PRCXI9300Api: time.sleep(1) return success - def call(self, service: str, method: str, params: Optional[list] = None) -> Any: payload = json.dumps( {"ServiceName": service, "MethodName": method, "Paramters": params or []}, separators=(",", ":") @@ -1543,7 +1593,7 @@ class PRCXI9300Api: assist_fun5: str = "", liquid_method: str = "NormalDispense", axis: str = "Left", - ) -> Dict[str, Any]: + ) -> Dict[str, Any]: return { "StepAxis": axis, "Function": "Imbibing", @@ -1621,7 +1671,7 @@ class PRCXI9300Api: assist_fun5: str = "", liquid_method: str = "NormalDispense", axis: str = "Left", - ) -> Dict[str, Any]: + ) -> Dict[str, Any]: return { "StepAxis": axis, "Function": "Blending", @@ -1681,11 +1731,11 @@ class PRCXI9300Api: "LiquidDispensingMethod": liquid_method, } - def clamp_jaw_pick_up(self, + def clamp_jaw_pick_up( + self, plate_no: int, is_whole_plate: bool, balance_height: int, - ) -> Dict[str, Any]: return { "StepAxis": "ClampingJaw", @@ -1695,7 +1745,7 @@ class PRCXI9300Api: "HoleRow": 1, "HoleCol": 1, "BalanceHeight": balance_height, - "PlateOrHoleNum": f"T{plate_no}" + "PlateOrHoleNum": f"T{plate_no}", } def clamp_jaw_drop( @@ -1703,7 +1753,6 @@ class PRCXI9300Api: plate_no: int, is_whole_plate: bool, balance_height: int, - ) -> Dict[str, Any]: return { "StepAxis": "ClampingJaw", @@ -1713,7 +1762,7 @@ class PRCXI9300Api: "HoleRow": 1, "HoleCol": 1, "BalanceHeight": balance_height, - "PlateOrHoleNum": f"T{plate_no}" + "PlateOrHoleNum": f"T{plate_no}", } def shaker_action(self, time: int, module_no: int, amplitude: int, is_wait: bool): @@ -1726,6 +1775,7 @@ class PRCXI9300Api: "AssistFun4": is_wait, } + class DefaultLayout: def __init__(self, product_name: str = "PRCXI9300"): @@ -2104,7 +2154,9 @@ if __name__ == "__main__": size_y=50, size_z=10, category="tip_rack", - ordered_items=collections.OrderedDict({k: f"{child_prefix}_{k}" for k, v in tip_racks["ordering"].items()}), + ordered_items=collections.OrderedDict( + {k: f"{child_prefix}_{k}" for k, v in tip_racks["ordering"].items()} + ), ) tip_rack_serialized = tip_rack.serialize() tip_rack_serialized["parent_name"] = deck.name @@ -2299,43 +2351,37 @@ if __name__ == "__main__": A = tree_to_list([resource_plr_to_ulab(deck)]) with open("deck.json", "w", encoding="utf-8") as f: - A.insert(0, { - "id": "PRCXI", - "name": "PRCXI", - "parent": None, - "type": "device", - "class": "liquid_handler.prcxi", - "position": { - "x": 0, - "y": 0, - "z": 0 - }, - "config": { - "deck": { - "_resource_child_name": "PRCXI_Deck", - "_resource_type": "unilabos.devices.liquid_handling.prcxi.prcxi:PRCXI9300Deck" + A.insert( + 0, + { + "id": "PRCXI", + "name": "PRCXI", + "parent": None, + "type": "device", + "class": "liquid_handler.prcxi", + "position": {"x": 0, "y": 0, "z": 0}, + "config": { + "deck": { + "_resource_child_name": "PRCXI_Deck", + "_resource_type": "unilabos.devices.liquid_handling.prcxi.prcxi:PRCXI9300Deck", + }, + "host": "192.168.0.121", + "port": 9999, + "timeout": 10.0, + "axis": "Right", + "channel_num": 1, + "setup": False, + "debug": True, + "simulator": True, + "matrix_id": "5de524d0-3f95-406c-86dd-f83626ebc7cb", + "is_9320": True, }, - "host": "192.168.0.121", - "port": 9999, - "timeout": 10.0, - "axis": "Right", - "channel_num": 1, - "setup": False, - "debug": True, - "simulator": True, - "matrix_id": "5de524d0-3f95-406c-86dd-f83626ebc7cb", - "is_9320": True + "data": {}, + "children": ["PRCXI_Deck"], }, - "data": {}, - "children": [ - "PRCXI_Deck" - ] - }) + ) A[1]["parent"] = "PRCXI" - json.dump({ - "nodes": A, - "links": [] - }, f, indent=4, ensure_ascii=False) + json.dump({"nodes": A, "links": []}, f, indent=4, ensure_ascii=False) handler = PRCXI9300Handler( deck=deck, @@ -2377,7 +2423,6 @@ if __name__ == "__main__": time.sleep(5) os._exit(0) - prcxi_api = PRCXI9300Api(host="192.168.0.121", port=9999) prcxi_api.list_matrices() prcxi_api.get_all_materials() diff --git a/unilabos/devices/neware_battery_test_system/__init__.py b/unilabos/devices/neware_battery_test_system/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/unilabos/devices/virtual/workbench.py b/unilabos/devices/virtual/workbench.py new file mode 100644 index 0000000..7a8e145 --- /dev/null +++ b/unilabos/devices/virtual/workbench.py @@ -0,0 +1,687 @@ +""" +Virtual Workbench Device - 模拟工作台设备 +包含: +- 1个机械臂 (每次操作3s, 独占锁) +- 3个加热台 (每次加热10s, 可并行) + +工作流程: +1. A1-A5 物料同时启动,竞争机械臂 +2. 机械臂将物料移动到空闲加热台 +3. 加热完成后,机械臂将物料移动到C1-C5 + +注意:调用来自线程池,使用 threading.Lock 进行同步 +""" +import logging +import time +from typing import Dict, Any, Optional +from dataclasses import dataclass +from enum import Enum +from threading import Lock, RLock + +from typing_extensions import TypedDict + +from unilabos.ros.nodes.base_device_node import BaseROS2DeviceNode +from unilabos.utils.decorator import not_action + + +# ============ TypedDict 返回类型定义 ============ + +class MoveToHeatingStationResult(TypedDict): + """move_to_heating_station 返回类型""" + success: bool + station_id: int + material_id: str + material_number: int + message: str + + +class StartHeatingResult(TypedDict): + """start_heating 返回类型""" + success: bool + station_id: int + material_id: str + material_number: int + message: str + + +class MoveToOutputResult(TypedDict): + """move_to_output 返回类型""" + success: bool + station_id: int + material_id: str + + +class PrepareMaterialsResult(TypedDict): + """prepare_materials 返回类型 - 批量准备物料""" + success: bool + count: int + material_1: int # 物料编号1 + material_2: int # 物料编号2 + material_3: int # 物料编号3 + material_4: int # 物料编号4 + material_5: int # 物料编号5 + message: str + + +# ============ 状态枚举 ============ + +class HeatingStationState(Enum): + """加热台状态枚举""" + IDLE = "idle" # 空闲 + OCCUPIED = "occupied" # 已放置物料,等待加热 + HEATING = "heating" # 加热中 + COMPLETED = "completed" # 加热完成,等待取走 + + +class ArmState(Enum): + """机械臂状态枚举""" + IDLE = "idle" # 空闲 + BUSY = "busy" # 工作中 + + +@dataclass +class HeatingStation: + """加热台数据结构""" + station_id: int + state: HeatingStationState = HeatingStationState.IDLE + current_material: Optional[str] = None # 当前物料 (如 "A1", "A2") + material_number: Optional[int] = None # 物料编号 (1-5) + heating_start_time: Optional[float] = None + heating_progress: float = 0.0 + + +class VirtualWorkbench: + """ + Virtual Workbench Device - 虚拟工作台设备 + + 模拟一个包含1个机械臂和3个加热台的工作站 + - 机械臂操作耗时3秒,同一时间只能执行一个操作 + - 加热台加热耗时10秒,3个加热台可并行工作 + + 工作流: + 1. 物料A1-A5并发启动(线程池),竞争机械臂使用权 + 2. 获取机械臂后,查找空闲加热台 + 3. 机械臂将物料放入加热台,开始加热 + 4. 加热完成后,机械臂将物料移动到目标位置Cn + """ + + _ros_node: BaseROS2DeviceNode + + # 配置常量 + ARM_OPERATION_TIME: float = 3.0 # 机械臂操作时间(秒) + HEATING_TIME: float = 10.0 # 加热时间(秒) + NUM_HEATING_STATIONS: int = 3 # 加热台数量 + + def __init__(self, device_id: Optional[str] = None, config: Optional[Dict[str, Any]] = None, **kwargs): + # 处理可能的不同调用方式 + if device_id is None and "id" in kwargs: + device_id = kwargs.pop("id") + if config is None and "config" in kwargs: + config = kwargs.pop("config") + + self.device_id = device_id or "virtual_workbench" + self.config = config or {} + + self.logger = logging.getLogger(f"VirtualWorkbench.{self.device_id}") + self.data: Dict[str, Any] = {} + + # 从config中获取可配置参数 + self.ARM_OPERATION_TIME = float(self.config.get("arm_operation_time", 3.0)) + self.HEATING_TIME = float(self.config.get("heating_time", 10.0)) + self.NUM_HEATING_STATIONS = int(self.config.get("num_heating_stations", 3)) + + # 机械臂状态和锁 (使用threading.Lock) + self._arm_lock = Lock() + self._arm_state = ArmState.IDLE + self._arm_current_task: Optional[str] = None + + # 加热台状态 (station_id -> HeatingStation) - 立即初始化,不依赖initialize() + self._heating_stations: Dict[int, HeatingStation] = { + i: HeatingStation(station_id=i) + for i in range(1, self.NUM_HEATING_STATIONS + 1) + } + self._stations_lock = RLock() # 可重入锁,保护加热台状态 + + # 任务追踪 + self._active_tasks: Dict[str, Dict[str, Any]] = {} # material_id -> task_info + self._tasks_lock = Lock() + + # 处理其他kwargs参数 + skip_keys = {"arm_operation_time", "heating_time", "num_heating_stations"} + for key, value in kwargs.items(): + if key not in skip_keys and not hasattr(self, key): + setattr(self, key, value) + + self.logger.info(f"=== 虚拟工作台 {self.device_id} 已创建 ===") + self.logger.info( + f"机械臂操作时间: {self.ARM_OPERATION_TIME}s | " + f"加热时间: {self.HEATING_TIME}s | " + f"加热台数量: {self.NUM_HEATING_STATIONS}" + ) + + @not_action + def post_init(self, ros_node: BaseROS2DeviceNode): + """ROS节点初始化后回调""" + self._ros_node = ros_node + + @not_action + def initialize(self) -> bool: + """初始化虚拟工作台""" + self.logger.info(f"初始化虚拟工作台 {self.device_id}") + + # 重置加热台状态 (已在__init__中创建,这里重置为初始状态) + with self._stations_lock: + for station in self._heating_stations.values(): + station.state = HeatingStationState.IDLE + station.current_material = None + station.material_number = None + station.heating_progress = 0.0 + + # 初始化状态 + self.data.update({ + "status": "Ready", + "arm_state": ArmState.IDLE.value, + "arm_current_task": None, + "heating_stations": self._get_stations_status(), + "active_tasks_count": 0, + "message": "工作台就绪", + }) + + self.logger.info(f"工作台初始化完成: {self.NUM_HEATING_STATIONS}个加热台就绪") + return True + + @not_action + def cleanup(self) -> bool: + """清理虚拟工作台""" + self.logger.info(f"清理虚拟工作台 {self.device_id}") + + self._arm_state = ArmState.IDLE + self._arm_current_task = None + + with self._stations_lock: + self._heating_stations.clear() + + with self._tasks_lock: + self._active_tasks.clear() + + self.data.update({ + "status": "Offline", + "arm_state": ArmState.IDLE.value, + "heating_stations": {}, + "message": "工作台已关闭", + }) + return True + + def _get_stations_status(self) -> Dict[int, Dict[str, Any]]: + """获取所有加热台状态""" + with self._stations_lock: + return { + station_id: { + "state": station.state.value, + "current_material": station.current_material, + "material_number": station.material_number, + "heating_progress": station.heating_progress, + } + for station_id, station in self._heating_stations.items() + } + + def _update_data_status(self, message: Optional[str] = None): + """更新状态数据""" + self.data.update({ + "arm_state": self._arm_state.value, + "arm_current_task": self._arm_current_task, + "heating_stations": self._get_stations_status(), + "active_tasks_count": len(self._active_tasks), + }) + if message: + self.data["message"] = message + + def _find_available_heating_station(self) -> Optional[int]: + """查找空闲的加热台 + + Returns: + 空闲加热台ID,如果没有则返回None + """ + with self._stations_lock: + for station_id, station in self._heating_stations.items(): + if station.state == HeatingStationState.IDLE: + return station_id + return None + + def _acquire_arm(self, task_description: str) -> bool: + """获取机械臂使用权(阻塞直到获取) + + Args: + task_description: 任务描述,用于日志 + + Returns: + 是否成功获取 + """ + self.logger.info(f"[{task_description}] 等待获取机械臂...") + + # 阻塞等待获取锁 + self._arm_lock.acquire() + + self._arm_state = ArmState.BUSY + self._arm_current_task = task_description + self._update_data_status(f"机械臂执行: {task_description}") + + self.logger.info(f"[{task_description}] 成功获取机械臂使用权") + return True + + def _release_arm(self): + """释放机械臂""" + task = self._arm_current_task + self._arm_state = ArmState.IDLE + self._arm_current_task = None + self._arm_lock.release() + self._update_data_status(f"机械臂已释放 (完成: {task})") + self.logger.info(f"机械臂已释放 (完成: {task})") + + def prepare_materials( + self, + count: int = 5, + ) -> PrepareMaterialsResult: + """ + 批量准备物料 - 虚拟起始节点 + + 作为工作流的起始节点,生成指定数量的物料编号供后续节点使用。 + 输出5个handle (material_1 ~ material_5),分别对应实验1~5。 + + Args: + count: 待生成的物料数量,默认5 (生成 A1-A5) + + Returns: + PrepareMaterialsResult: 包含 material_1 ~ material_5 用于传递给 move_to_heating_station + """ + # 生成物料列表 A1 - A{count} + materials = [i for i in range(1, count + 1)] + + self.logger.info( + f"[准备物料] 生成 {count} 个物料: " + f"A1-A{count} -> material_1~material_{count}" + ) + + return { + "success": True, + "count": count, + "material_1": materials[0] if len(materials) > 0 else 0, + "material_2": materials[1] if len(materials) > 1 else 0, + "material_3": materials[2] if len(materials) > 2 else 0, + "material_4": materials[3] if len(materials) > 3 else 0, + "material_5": materials[4] if len(materials) > 4 else 0, + "message": f"已准备 {count} 个物料: A1-A{count}", + } + + def move_to_heating_station( + self, + material_number: int, + ) -> MoveToHeatingStationResult: + """ + 将物料从An位置移动到加热台 + + 多线程并发调用时,会竞争机械臂使用权,并自动查找空闲加热台 + + Args: + material_number: 物料编号 (1-5) + + Returns: + MoveToHeatingStationResult: 包含 station_id, material_number 等用于传递给下一个节点 + """ + # 根据物料编号生成物料ID + material_id = f"A{material_number}" + task_desc = f"移动{material_id}到加热台" + self.logger.info(f"[任务] {task_desc} - 开始执行") + + # 记录任务 + with self._tasks_lock: + self._active_tasks[material_id] = { + "status": "waiting_for_arm", + "start_time": time.time(), + } + + try: + # 步骤1: 等待获取机械臂使用权(竞争) + with self._tasks_lock: + self._active_tasks[material_id]["status"] = "waiting_for_arm" + self._acquire_arm(task_desc) + + # 步骤2: 查找空闲加热台 + with self._tasks_lock: + self._active_tasks[material_id]["status"] = "finding_station" + station_id = None + + # 循环等待直到找到空闲加热台 + while station_id is None: + station_id = self._find_available_heating_station() + if station_id is None: + self.logger.info(f"[{material_id}] 没有空闲加热台,等待中...") + # 释放机械臂,等待后重试 + self._release_arm() + time.sleep(0.5) + self._acquire_arm(task_desc) + + # 步骤3: 占用加热台 - 立即标记为OCCUPIED,防止其他任务选择同一加热台 + with self._stations_lock: + self._heating_stations[station_id].state = HeatingStationState.OCCUPIED + self._heating_stations[station_id].current_material = material_id + self._heating_stations[station_id].material_number = material_number + + # 步骤4: 模拟机械臂移动操作 (3秒) + with self._tasks_lock: + self._active_tasks[material_id]["status"] = "arm_moving" + self._active_tasks[material_id]["assigned_station"] = station_id + self.logger.info(f"[{material_id}] 机械臂正在移动到加热台{station_id}...") + + time.sleep(self.ARM_OPERATION_TIME) + + # 步骤5: 放入加热台完成 + self._update_data_status(f"{material_id}已放入加热台{station_id}") + self.logger.info(f"[{material_id}] 已放入加热台{station_id} (用时{self.ARM_OPERATION_TIME}s)") + + # 释放机械臂 + self._release_arm() + + with self._tasks_lock: + self._active_tasks[material_id]["status"] = "placed_on_station" + + return { + "success": True, + "station_id": station_id, + "material_id": material_id, + "material_number": material_number, + "message": f"{material_id}已成功移动到加热台{station_id}", + } + + except Exception as e: + self.logger.error(f"[{material_id}] 移动失败: {str(e)}") + if self._arm_lock.locked(): + self._release_arm() + return { + "success": False, + "station_id": -1, + "material_id": material_id, + "material_number": material_number, + "message": f"移动失败: {str(e)}", + } + + def start_heating( + self, + station_id: int, + material_number: int, + ) -> StartHeatingResult: + """ + 启动指定加热台的加热程序 + + Args: + station_id: 加热台ID (1-3),从 move_to_heating_station 的 handle 传入 + material_number: 物料编号,从 move_to_heating_station 的 handle 传入 + + Returns: + StartHeatingResult: 包含 station_id, material_number 等用于传递给下一个节点 + """ + self.logger.info(f"[加热台{station_id}] 开始加热") + + if station_id not in self._heating_stations: + return { + "success": False, + "station_id": station_id, + "material_id": "", + "material_number": material_number, + "message": f"无效的加热台ID: {station_id}", + } + + with self._stations_lock: + station = self._heating_stations[station_id] + + if station.current_material is None: + return { + "success": False, + "station_id": station_id, + "material_id": "", + "material_number": material_number, + "message": f"加热台{station_id}上没有物料", + } + + if station.state == HeatingStationState.HEATING: + return { + "success": False, + "station_id": station_id, + "material_id": station.current_material, + "material_number": material_number, + "message": f"加热台{station_id}已经在加热中", + } + + material_id = station.current_material + + # 开始加热 + station.state = HeatingStationState.HEATING + station.heating_start_time = time.time() + station.heating_progress = 0.0 + + with self._tasks_lock: + if material_id in self._active_tasks: + self._active_tasks[material_id]["status"] = "heating" + + self._update_data_status(f"加热台{station_id}开始加热{material_id}") + + # 模拟加热过程 (10秒) + start_time = time.time() + while True: + elapsed = time.time() - start_time + progress = min(100.0, (elapsed / self.HEATING_TIME) * 100) + + with self._stations_lock: + self._heating_stations[station_id].heating_progress = progress + + self._update_data_status(f"加热台{station_id}加热中: {progress:.1f}%") + + if elapsed >= self.HEATING_TIME: + break + + time.sleep(1.0) + + # 加热完成 + with self._stations_lock: + self._heating_stations[station_id].state = HeatingStationState.COMPLETED + self._heating_stations[station_id].heating_progress = 100.0 + + with self._tasks_lock: + if material_id in self._active_tasks: + self._active_tasks[material_id]["status"] = "heating_completed" + + self._update_data_status(f"加热台{station_id}加热完成") + self.logger.info(f"[加热台{station_id}] {material_id}加热完成 (用时{self.HEATING_TIME}s)") + + return { + "success": True, + "station_id": station_id, + "material_id": material_id, + "material_number": material_number, + "message": f"加热台{station_id}加热完成", + } + + def move_to_output( + self, + station_id: int, + material_number: int, + ) -> MoveToOutputResult: + """ + 将物料从加热台移动到输出位置Cn + + Args: + station_id: 加热台ID (1-3),从 start_heating 的 handle 传入 + material_number: 物料编号,从 start_heating 的 handle 传入,用于确定输出位置 Cn + + Returns: + MoveToOutputResult: 包含执行结果 + """ + output_number = material_number # 物料编号决定输出位置 + + if station_id not in self._heating_stations: + return { + "success": False, + "station_id": station_id, + "material_id": "", + "output_position": f"C{output_number}", + "message": f"无效的加热台ID: {station_id}", + } + + with self._stations_lock: + station = self._heating_stations[station_id] + material_id = station.current_material + + if material_id is None: + return { + "success": False, + "station_id": station_id, + "material_id": "", + "output_position": f"C{output_number}", + "message": f"加热台{station_id}上没有物料", + } + + if station.state != HeatingStationState.COMPLETED: + return { + "success": False, + "station_id": station_id, + "material_id": material_id, + "output_position": f"C{output_number}", + "message": f"加热台{station_id}尚未完成加热 (当前状态: {station.state.value})", + } + + output_position = f"C{output_number}" + task_desc = f"从加热台{station_id}移动{material_id}到{output_position}" + self.logger.info(f"[任务] {task_desc}") + + try: + with self._tasks_lock: + if material_id in self._active_tasks: + self._active_tasks[material_id]["status"] = "waiting_for_arm_output" + + # 获取机械臂 + self._acquire_arm(task_desc) + + with self._tasks_lock: + if material_id in self._active_tasks: + self._active_tasks[material_id]["status"] = "arm_moving_to_output" + + # 模拟机械臂操作 (3秒) + self.logger.info(f"[{material_id}] 机械臂正在从加热台{station_id}取出并移动到{output_position}...") + time.sleep(self.ARM_OPERATION_TIME) + + # 清空加热台 + with self._stations_lock: + self._heating_stations[station_id].state = HeatingStationState.IDLE + self._heating_stations[station_id].current_material = None + self._heating_stations[station_id].material_number = None + self._heating_stations[station_id].heating_progress = 0.0 + self._heating_stations[station_id].heating_start_time = None + + # 释放机械臂 + self._release_arm() + + # 任务完成 + with self._tasks_lock: + if material_id in self._active_tasks: + self._active_tasks[material_id]["status"] = "completed" + self._active_tasks[material_id]["end_time"] = time.time() + + self._update_data_status(f"{material_id}已移动到{output_position}") + self.logger.info(f"[{material_id}] 已成功移动到{output_position} (用时{self.ARM_OPERATION_TIME}s)") + + return { + "success": True, + "station_id": station_id, + "material_id": material_id, + "output_position": output_position, + "message": f"{material_id}已成功移动到{output_position}", + } + + except Exception as e: + self.logger.error(f"移动到输出位置失败: {str(e)}") + if self._arm_lock.locked(): + self._release_arm() + return { + "success": False, + "station_id": station_id, + "material_id": "", + "output_position": output_position, + "message": f"移动失败: {str(e)}", + } + + # ============ 状态属性 ============ + + @property + def status(self) -> str: + return self.data.get("status", "Unknown") + + @property + def arm_state(self) -> str: + return self._arm_state.value + + @property + def arm_current_task(self) -> str: + return self._arm_current_task or "" + + @property + def heating_station_1_state(self) -> str: + with self._stations_lock: + station = self._heating_stations.get(1) + return station.state.value if station else "unknown" + + @property + def heating_station_1_material(self) -> str: + with self._stations_lock: + station = self._heating_stations.get(1) + return station.current_material or "" if station else "" + + @property + def heating_station_1_progress(self) -> float: + with self._stations_lock: + station = self._heating_stations.get(1) + return station.heating_progress if station else 0.0 + + @property + def heating_station_2_state(self) -> str: + with self._stations_lock: + station = self._heating_stations.get(2) + return station.state.value if station else "unknown" + + @property + def heating_station_2_material(self) -> str: + with self._stations_lock: + station = self._heating_stations.get(2) + return station.current_material or "" if station else "" + + @property + def heating_station_2_progress(self) -> float: + with self._stations_lock: + station = self._heating_stations.get(2) + return station.heating_progress if station else 0.0 + + @property + def heating_station_3_state(self) -> str: + with self._stations_lock: + station = self._heating_stations.get(3) + return station.state.value if station else "unknown" + + @property + def heating_station_3_material(self) -> str: + with self._stations_lock: + station = self._heating_stations.get(3) + return station.current_material or "" if station else "" + + @property + def heating_station_3_progress(self) -> float: + with self._stations_lock: + station = self._heating_stations.get(3) + return station.heating_progress if station else 0.0 + + @property + def active_tasks_count(self) -> int: + with self._tasks_lock: + return len(self._active_tasks) + + @property + def message(self) -> str: + return self.data.get("message", "") diff --git a/unilabos/devices/workstation/bioyond_studio/bioyond_cell/__init__.py b/unilabos/devices/workstation/bioyond_studio/bioyond_cell/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/unilabos/devices/workstation/bioyond_studio/dispensing_station/__init__.py b/unilabos/devices/workstation/bioyond_studio/dispensing_station/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/unilabos/devices/workstation/bioyond_studio/reaction_station/__init__.py b/unilabos/devices/workstation/bioyond_studio/reaction_station/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/unilabos/devices/xrd_d7mate/__init__.py b/unilabos/devices/xrd_d7mate/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/unilabos/devices/zhida_hplc/__init__.py b/unilabos/devices/zhida_hplc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/unilabos/registry/devices/liquid_handler.yaml b/unilabos/registry/devices/liquid_handler.yaml index 298eb70..b2612e7 100644 --- a/unilabos/registry/devices/liquid_handler.yaml +++ b/unilabos/registry/devices/liquid_handler.yaml @@ -9284,7 +9284,13 @@ liquid_handler.prcxi: data_source: handle data_type: resource handler_key: input_wells - label: InputWells + label: 待设定液体孔 + output: + - data_key: wells.@flatten + data_source: executor + data_type: resource + handler_key: output_wells + label: 已设定液体孔 placeholder_keys: wells: unilabos_resources result: {} @@ -9400,6 +9406,163 @@ liquid_handler.prcxi: title: LiquidHandlerSetLiquid type: object type: LiquidHandlerSetLiquid + set_liquid_from_plate: + feedback: {} + goal: {} + goal_default: + liquid_names: null + plate: null + volumes: null + well_names: null + handles: + input: + - data_key: plate + data_source: handle + data_type: resource + handler_key: input_plate + label: 待设定液体板 + output: + - data_key: plate.@flatten + data_source: executor + data_type: resource + handler_key: output_plate + label: 已设定液体板 + - data_key: wells.@flatten + data_source: executor + data_type: resource + handler_key: output_wells + label: 已设定液体孔 + - data_key: volumes + data_source: executor + data_type: number_array + handler_key: output_volumes + label: 各孔设定体积 + placeholder_keys: + plate: unilabos_resources + result: {} + schema: + description: '' + properties: + feedback: {} + goal: + properties: + liquid_names: + items: + type: string + type: array + plate: + properties: + category: + type: string + children: + items: + type: string + type: array + config: + type: string + data: + type: string + id: + type: string + name: + type: string + parent: + type: string + pose: + properties: + orientation: + properties: + w: + type: number + x: + type: number + y: + type: number + z: + type: number + required: + - x + - y + - z + - w + title: orientation + type: object + position: + properties: + x: + type: number + y: + type: number + z: + type: number + required: + - x + - y + - z + title: position + type: object + required: + - position + - orientation + title: pose + type: object + sample_id: + type: string + type: + type: string + required: + - id + - name + - sample_id + - children + - parent + - type + - category + - pose + - config + - data + title: plate + type: object + volumes: + items: + type: number + type: array + well_names: + items: + type: string + type: array + required: + - plate + - well_names + - liquid_names + - volumes + type: object + result: + properties: + plate: + items: {} + title: Plate + type: array + volumes: + items: + type: number + title: Volumes + type: array + wells: + items: {} + title: Wells + type: array + required: + - plate + - wells + - volumes + title: SetLiquidFromPlateReturn + type: object + required: + - goal + title: set_liquid_from_plate参数 + type: object + type: UniLabJsonCommand set_tiprack: feedback: {} goal: @@ -9745,21 +9908,21 @@ liquid_handler.prcxi: - 0 handles: input: - - data_key: liquid + - data_key: sources data_source: handle data_type: resource - handler_key: sources - label: sources - - data_key: liquid - data_source: executor + handler_key: sources_identifier + label: 待移动液体 + - data_key: targets + data_source: handle data_type: resource - handler_key: targets - label: targets - - data_key: liquid - data_source: executor + handler_key: targets_identifier + label: 转移目标 + - data_key: tip_rack + data_source: handle data_type: resource - handler_key: tip_rack - label: tip_rack + handler_key: tip_rack_identifier + label: 墙头盒 output: - data_key: liquid data_source: handle diff --git a/unilabos/registry/devices/virtual_device.yaml b/unilabos/registry/devices/virtual_device.yaml index 77ac533..c38655c 100644 --- a/unilabos/registry/devices/virtual_device.yaml +++ b/unilabos/registry/devices/virtual_device.yaml @@ -5792,3 +5792,381 @@ virtual_vacuum_pump: - status type: object version: 1.0.0 +virtual_workbench: + category: + - virtual_device + class: + action_value_mappings: + auto-move_to_heating_station: + feedback: {} + goal: {} + goal_default: + material_number: null + handles: + input: + - data_key: material_number + data_source: handle + data_type: workbench_material + handler_key: material_input + label: 物料编号 + output: + - data_key: station_id + data_source: executor + data_type: workbench_station + handler_key: heating_station_output + label: 加热台ID + - data_key: material_number + data_source: executor + data_type: workbench_material + handler_key: material_number_output + label: 物料编号 + placeholder_keys: {} + result: {} + schema: + description: 将物料从An位置移动到空闲加热台,返回分配的加热台ID + properties: + feedback: {} + goal: + properties: + material_number: + description: 物料编号,1-5,物料ID自动生成为A{n} + type: integer + required: + - material_number + type: object + result: + description: move_to_heating_station 返回类型 + properties: + material_id: + title: Material Id + type: string + material_number: + title: Material Number + type: integer + message: + title: Message + type: string + station_id: + description: 分配的加热台ID + title: Station Id + type: integer + success: + title: Success + type: boolean + required: + - success + - station_id + - material_id + - material_number + - message + title: MoveToHeatingStationResult + type: object + required: + - goal + title: move_to_heating_station参数 + type: object + type: UniLabJsonCommand + auto-move_to_output: + feedback: {} + goal: {} + goal_default: + material_number: null + station_id: null + handles: + input: + - data_key: station_id + data_source: handle + data_type: workbench_station + handler_key: output_station_input + label: 加热台ID + - data_key: material_number + data_source: handle + data_type: workbench_material + handler_key: output_material_input + label: 物料编号 + placeholder_keys: {} + result: {} + schema: + description: 将物料从加热台移动到输出位置Cn + properties: + feedback: {} + goal: + properties: + material_number: + description: 物料编号,用于确定输出位置Cn + type: integer + station_id: + description: 加热台ID,1-3,从上一节点传入 + type: integer + required: + - station_id + - material_number + type: object + result: + description: move_to_output 返回类型 + properties: + material_id: + title: Material Id + type: string + station_id: + title: Station Id + type: integer + success: + title: Success + type: boolean + required: + - success + - station_id + - material_id + title: MoveToOutputResult + type: object + required: + - goal + title: move_to_output参数 + type: object + type: UniLabJsonCommand + auto-prepare_materials: + feedback: {} + goal: {} + goal_default: + count: 5 + handles: + output: + - data_key: material_1 + data_source: executor + data_type: workbench_material + handler_key: channel_1 + label: 实验1 + - data_key: material_2 + data_source: executor + data_type: workbench_material + handler_key: channel_2 + label: 实验2 + - data_key: material_3 + data_source: executor + data_type: workbench_material + handler_key: channel_3 + label: 实验3 + - data_key: material_4 + data_source: executor + data_type: workbench_material + handler_key: channel_4 + label: 实验4 + - data_key: material_5 + data_source: executor + data_type: workbench_material + handler_key: channel_5 + label: 实验5 + placeholder_keys: {} + result: {} + schema: + description: 批量准备物料 - 虚拟起始节点,生成A1-A5物料,输出5个handle供后续节点使用 + properties: + feedback: {} + goal: + properties: + count: + default: 5 + description: 待生成的物料数量,默认5 (生成 A1-A5) + type: integer + required: [] + type: object + result: + description: prepare_materials 返回类型 - 批量准备物料 + properties: + count: + title: Count + type: integer + material_1: + title: Material 1 + type: integer + material_2: + title: Material 2 + type: integer + material_3: + title: Material 3 + type: integer + material_4: + title: Material 4 + type: integer + material_5: + title: Material 5 + type: integer + message: + title: Message + type: string + success: + title: Success + type: boolean + required: + - success + - count + - material_1 + - material_2 + - material_3 + - material_4 + - material_5 + - message + title: PrepareMaterialsResult + type: object + required: + - goal + title: prepare_materials参数 + type: object + type: UniLabJsonCommand + auto-start_heating: + feedback: {} + goal: {} + goal_default: + material_number: null + station_id: null + handles: + input: + - data_key: station_id + data_source: handle + data_type: workbench_station + handler_key: station_id_input + label: 加热台ID + - data_key: material_number + data_source: handle + data_type: workbench_material + handler_key: material_number_input + label: 物料编号 + output: + - data_key: station_id + data_source: executor + data_type: workbench_station + handler_key: heating_done_station + label: 加热完成-加热台ID + - data_key: material_number + data_source: executor + data_type: workbench_material + handler_key: heating_done_material + label: 加热完成-物料编号 + placeholder_keys: {} + result: {} + schema: + description: 启动指定加热台的加热程序 + properties: + feedback: {} + goal: + properties: + material_number: + description: 物料编号,从上一节点传入 + type: integer + station_id: + description: 加热台ID,1-3,从上一节点传入 + type: integer + required: + - station_id + - material_number + type: object + result: + description: start_heating 返回类型 + properties: + material_id: + title: Material Id + type: string + material_number: + title: Material Number + type: integer + message: + title: Message + type: string + station_id: + title: Station Id + type: integer + success: + title: Success + type: boolean + required: + - success + - station_id + - material_id + - material_number + - message + title: StartHeatingResult + type: object + required: + - goal + title: start_heating参数 + type: object + type: UniLabJsonCommand + module: unilabos.devices.virtual.workbench:VirtualWorkbench + status_types: + active_tasks_count: int + arm_current_task: str + arm_state: str + heating_station_1_material: str + heating_station_1_progress: float + heating_station_1_state: str + heating_station_2_material: str + heating_station_2_progress: float + heating_station_2_state: str + heating_station_3_material: str + heating_station_3_progress: float + heating_station_3_state: str + message: str + status: str + type: python + config_info: [] + description: Virtual Workbench with 1 robotic arm and 3 heating stations for concurrent + material processing + handles: [] + icon: '' + init_param_schema: + config: + properties: + config: + type: string + device_id: + type: string + required: [] + type: object + data: + properties: + active_tasks_count: + type: integer + arm_current_task: + type: string + arm_state: + type: string + heating_station_1_material: + type: string + heating_station_1_progress: + type: number + heating_station_1_state: + type: string + heating_station_2_material: + type: string + heating_station_2_progress: + type: number + heating_station_2_state: + type: string + heating_station_3_material: + type: string + heating_station_3_progress: + type: number + heating_station_3_state: + type: string + message: + type: string + status: + type: string + required: + - status + - arm_state + - arm_current_task + - heating_station_1_state + - heating_station_1_material + - heating_station_1_progress + - heating_station_2_state + - heating_station_2_material + - heating_station_2_progress + - heating_station_3_state + - heating_station_3_material + - heating_station_3_progress + - active_tasks_count + - message + type: object + version: 1.0.0 diff --git a/unilabos/registry/registry.py b/unilabos/registry/registry.py index f09b79c..ef111e6 100644 --- a/unilabos/registry/registry.py +++ b/unilabos/registry/registry.py @@ -4,6 +4,8 @@ import os import sys import inspect import importlib +import threading +from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path from typing import Any, Dict, List, Union, Tuple @@ -60,6 +62,7 @@ class Registry: self.device_module_to_registry = {} self.resource_type_registry = {} self._setup_called = False # 跟踪setup是否已调用 + self._registry_lock = threading.Lock() # 多线程加载时的锁 # 其他状态变量 # self.is_host_mode = False # 移至BasicConfig中 @@ -71,6 +74,20 @@ class Registry: from unilabos.app.web.utils.action_utils import get_yaml_from_goal_type + # 获取 HostNode 类的增强信息,用于自动生成 action schema + host_node_enhanced_info = get_enhanced_class_info( + "unilabos.ros.nodes.presets.host_node:HostNode", use_dynamic=True + ) + + # 为 test_latency 生成 schema,保留原有 description + test_latency_method_info = host_node_enhanced_info.get("action_methods", {}).get("test_latency", {}) + test_latency_schema = self._generate_unilab_json_command_schema( + test_latency_method_info.get("args", []), + "test_latency", + test_latency_method_info.get("return_annotation"), + ) + test_latency_schema["description"] = "用于测试延迟的动作,返回延迟时间和时间差。" + self.device_type_registry.update( { "host_node": { @@ -149,17 +166,22 @@ class Registry: "res_id": "unilabos_resources", # 将当前实验室的全部物料id作为下拉框可选择 "device_id": "unilabos_devices", # 将当前实验室的全部设备id作为下拉框可选择 "parent": "unilabos_nodes", # 将当前实验室的设备/物料作为下拉框可选择 + "class_name": "unilabos_class", }, }, "test_latency": { - "type": self.EmptyIn, + "type": ( + "UniLabJsonCommandAsync" + if test_latency_method_info.get("is_async", False) + else "UniLabJsonCommand" + ), "goal": {}, "feedback": {}, "result": {}, - "schema": ros_action_to_json_schema( - self.EmptyIn, "用于测试延迟的动作,返回延迟时间和时间差。" - ), - "goal_default": {}, + "schema": test_latency_schema, + "goal_default": { + arg["name"]: arg["default"] for arg in test_latency_method_info.get("args", []) + }, "handles": {}, }, "auto-test_resource": { @@ -242,67 +264,115 @@ class Registry: # 标记setup已被调用 self._setup_called = True + def _load_single_resource_file( + self, file: Path, complete_registry: bool, upload_registry: bool + ) -> Tuple[Dict[str, Any], Dict[str, Any], bool]: + """ + 加载单个资源文件 (线程安全) + + Returns: + (data, complete_data, is_valid): 资源数据, 完整数据, 是否有效 + """ + try: + with open(file, encoding="utf-8", mode="r") as f: + data = yaml.safe_load(io.StringIO(f.read())) + except Exception as e: + logger.warning(f"[UniLab Registry] 读取资源文件失败: {file}, 错误: {e}") + return {}, {}, False + + if not data: + return {}, {}, False + + complete_data = {} + for resource_id, resource_info in data.items(): + if "version" not in resource_info: + resource_info["version"] = "1.0.0" + if "category" not in resource_info: + resource_info["category"] = [file.stem] + elif file.stem not in resource_info["category"]: + resource_info["category"].append(file.stem) + elif not isinstance(resource_info.get("category"), list): + resource_info["category"] = [resource_info["category"]] + if "config_info" not in resource_info: + resource_info["config_info"] = [] + if "icon" not in resource_info: + resource_info["icon"] = "" + if "handles" not in resource_info: + resource_info["handles"] = [] + if "init_param_schema" not in resource_info: + resource_info["init_param_schema"] = {} + if "config_info" in resource_info: + del resource_info["config_info"] + if "file_path" in resource_info: + del resource_info["file_path"] + complete_data[resource_id] = copy.deepcopy(dict(sorted(resource_info.items()))) + if upload_registry: + class_info = resource_info.get("class", {}) + if len(class_info) and "module" in class_info: + if class_info.get("type") == "pylabrobot": + res_class = get_class(class_info["module"]) + if callable(res_class) and not isinstance(res_class, type): + res_instance = res_class(res_class.__name__) + res_ulr = tree_to_list([resource_plr_to_ulab(res_instance)]) + resource_info["config_info"] = res_ulr + resource_info["registry_type"] = "resource" + resource_info["file_path"] = str(file.absolute()).replace("\\", "/") + + complete_data = dict(sorted(complete_data.items())) + complete_data = copy.deepcopy(complete_data) + + if complete_registry: + try: + with open(file, "w", encoding="utf-8") as f: + yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper) + except Exception as e: + logger.warning(f"[UniLab Registry] 写入资源文件失败: {file}, 错误: {e}") + + return data, complete_data, True + def load_resource_types(self, path: os.PathLike, complete_registry: bool, upload_registry: bool): abs_path = Path(path).absolute() resource_path = abs_path / "resources" files = list(resource_path.glob("*/*.yaml")) - logger.trace(f"[UniLab Registry] load resources? {resource_path.exists()}, total: {len(files)}") - current_resource_number = len(self.resource_type_registry) + 1 - for i, file in enumerate(files): - with open(file, encoding="utf-8", mode="r") as f: - data = yaml.safe_load(io.StringIO(f.read())) - complete_data = {} - if data: - # 为每个资源添加文件路径信息 - for resource_id, resource_info in data.items(): - if "version" not in resource_info: - resource_info["version"] = "1.0.0" - if "category" not in resource_info: - resource_info["category"] = [file.stem] - elif file.stem not in resource_info["category"]: - resource_info["category"].append(file.stem) - elif not isinstance(resource_info.get("category"), list): - resource_info["category"] = [resource_info["category"]] - if "config_info" not in resource_info: - resource_info["config_info"] = [] - if "icon" not in resource_info: - resource_info["icon"] = "" - if "handles" not in resource_info: - resource_info["handles"] = [] - if "init_param_schema" not in resource_info: - resource_info["init_param_schema"] = {} - if "config_info" in resource_info: - del resource_info["config_info"] - if "file_path" in resource_info: - del resource_info["file_path"] - complete_data[resource_id] = copy.deepcopy(dict(sorted(resource_info.items()))) - if upload_registry: - class_info = resource_info.get("class", {}) - if len(class_info) and "module" in class_info: - if class_info.get("type") == "pylabrobot": - res_class = get_class(class_info["module"]) - if callable(res_class) and not isinstance( - res_class, type - ): # 有的是类,有的是函数,这里暂时只登记函数类的 - res_instance = res_class(res_class.__name__) - res_ulr = tree_to_list([resource_plr_to_ulab(res_instance)]) - resource_info["config_info"] = res_ulr - resource_info["registry_type"] = "resource" - resource_info["file_path"] = str(file.absolute()).replace("\\", "/") - complete_data = dict(sorted(complete_data.items())) - complete_data = copy.deepcopy(complete_data) - if complete_registry: - with open(file, "w", encoding="utf-8") as f: - yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper) + logger.debug(f"[UniLab Registry] resources: {resource_path.exists()}, total: {len(files)}") + if not files: + return + + # 使用线程池并行加载 + max_workers = min(8, len(files)) + results = [] + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_file = { + executor.submit(self._load_single_resource_file, file, complete_registry, upload_registry): file + for file in files + } + for future in as_completed(future_to_file): + file = future_to_file[future] + try: + data, complete_data, is_valid = future.result() + if is_valid: + results.append((file, data)) + except Exception as e: + logger.warning(f"[UniLab Registry] 处理资源文件异常: {file}, 错误: {e}") + + # 线程安全地更新注册表 + current_resource_number = len(self.resource_type_registry) + 1 + with self._registry_lock: + for i, (file, data) in enumerate(results): self.resource_type_registry.update(data) - logger.trace( # type: ignore - f"[UniLab Registry] Resource-{current_resource_number} File-{i+1}/{len(files)} " + logger.trace( + f"[UniLab Registry] Resource-{current_resource_number} File-{i+1}/{len(results)} " + f"Add {list(data.keys())}" ) current_resource_number += 1 - else: - logger.debug(f"[UniLab Registry] Res File-{i+1}/{len(files)} Not Valid YAML File: {file.absolute()}") + + # 记录无效文件 + valid_files = {r[0] for r in results} + for file in files: + if file not in valid_files: + logger.debug(f"[UniLab Registry] Res File Not Valid YAML File: {file.absolute()}") def _extract_class_docstrings(self, module_string: str) -> Dict[str, str]: """ @@ -540,11 +610,9 @@ class Registry: return final_schema - def _preserve_field_descriptions( - self, new_schema: Dict[str, Any], previous_schema: Dict[str, Any] - ) -> None: + def _preserve_field_descriptions(self, new_schema: Dict[str, Any], previous_schema: Dict[str, Any]) -> None: """ - 保留之前 schema 中 goal/feedback/result 下一级字段的 description + 保留之前 schema 中 goal/feedback/result 下一级字段的 description 和 title Args: new_schema: 新生成的 schema(会被修改) @@ -566,6 +634,9 @@ class Registry: # 保留字段的 description if "description" in prev_field and prev_field["description"]: field_schema["description"] = prev_field["description"] + # 保留字段的 title(用户自定义的中文名) + if "title" in prev_field and prev_field["title"]: + field_schema["title"] = prev_field["title"] def _is_typed_dict(self, annotation: Any) -> bool: """ @@ -653,213 +724,244 @@ class Registry: "handles": {}, } + def _load_single_device_file( + self, file: Path, complete_registry: bool, get_yaml_from_goal_type + ) -> Tuple[Dict[str, Any], Dict[str, Any], bool, List[str]]: + """ + 加载单个设备文件 (线程安全) + + Returns: + (data, complete_data, is_valid, device_ids): 设备数据, 完整数据, 是否有效, 设备ID列表 + """ + try: + with open(file, encoding="utf-8", mode="r") as f: + data = yaml.safe_load(io.StringIO(f.read())) + except Exception as e: + logger.warning(f"[UniLab Registry] 读取设备文件失败: {file}, 错误: {e}") + return {}, {}, False, [] + + if not data: + return {}, {}, False, [] + + complete_data = {} + action_str_type_mapping = { + "UniLabJsonCommand": "UniLabJsonCommand", + "UniLabJsonCommandAsync": "UniLabJsonCommandAsync", + } + status_str_type_mapping = {} + device_ids = [] + + for device_id, device_config in data.items(): + if "version" not in device_config: + device_config["version"] = "1.0.0" + if "category" not in device_config: + device_config["category"] = [file.stem] + elif file.stem not in device_config["category"]: + device_config["category"].append(file.stem) + if "config_info" not in device_config: + device_config["config_info"] = [] + if "description" not in device_config: + device_config["description"] = "" + if "icon" not in device_config: + device_config["icon"] = "" + if "handles" not in device_config: + device_config["handles"] = [] + if "init_param_schema" not in device_config: + device_config["init_param_schema"] = {} + if "class" in device_config: + if "status_types" not in device_config["class"] or device_config["class"]["status_types"] is None: + device_config["class"]["status_types"] = {} + if ( + "action_value_mappings" not in device_config["class"] + or device_config["class"]["action_value_mappings"] is None + ): + device_config["class"]["action_value_mappings"] = {} + enhanced_info = {} + if complete_registry: + device_config["class"]["status_types"].clear() + enhanced_info = get_enhanced_class_info(device_config["class"]["module"], use_dynamic=True) + if not enhanced_info.get("dynamic_import_success", False): + continue + device_config["class"]["status_types"].update( + {k: v["return_type"] for k, v in enhanced_info["status_methods"].items()} + ) + for status_name, status_type in device_config["class"]["status_types"].items(): + if isinstance(status_type, tuple) or status_type in ["Any", "None", "Unknown"]: + status_type = "String" + device_config["class"]["status_types"][status_name] = status_type + try: + target_type = self._replace_type_with_class(status_type, device_id, f"状态 {status_name}") + except ROSMsgNotFound: + continue + if target_type in [dict, list]: + target_type = String + status_str_type_mapping[status_type] = target_type + device_config["class"]["status_types"] = dict(sorted(device_config["class"]["status_types"].items())) + if complete_registry: + old_action_configs = {} + for action_name, action_config in device_config["class"]["action_value_mappings"].items(): + old_action_configs[action_name] = action_config + + device_config["class"]["action_value_mappings"] = { + k: v + for k, v in device_config["class"]["action_value_mappings"].items() + if not k.startswith("auto-") + } + device_config["class"]["action_value_mappings"].update( + { + f"auto-{k}": { + "type": "UniLabJsonCommandAsync" if v["is_async"] else "UniLabJsonCommand", + "goal": {}, + "feedback": {}, + "result": {}, + "schema": self._generate_unilab_json_command_schema( + v["args"], + k, + v.get("return_annotation"), + old_action_configs.get(f"auto-{k}", {}).get("schema"), + ), + "goal_default": {i["name"]: i["default"] for i in v["args"]}, + "handles": old_action_configs.get(f"auto-{k}", {}).get("handles", []), + "placeholder_keys": { + i["name"]: ( + "unilabos_resources" + if i["type"] == "unilabos.registry.placeholder_type:ResourceSlot" + or i["type"] == ("list", "unilabos.registry.placeholder_type:ResourceSlot") + else "unilabos_devices" + ) + for i in v["args"] + if i.get("type", "") + in [ + "unilabos.registry.placeholder_type:ResourceSlot", + "unilabos.registry.placeholder_type:DeviceSlot", + ("list", "unilabos.registry.placeholder_type:ResourceSlot"), + ("list", "unilabos.registry.placeholder_type:DeviceSlot"), + ] + }, + } + for k, v in enhanced_info["action_methods"].items() + if k not in device_config["class"]["action_value_mappings"] + } + ) + for action_name, old_config in old_action_configs.items(): + if action_name in device_config["class"]["action_value_mappings"]: + old_schema = old_config.get("schema", {}) + if "description" in old_schema and old_schema["description"]: + device_config["class"]["action_value_mappings"][action_name]["schema"][ + "description" + ] = old_schema["description"] + device_config["init_param_schema"] = {} + device_config["init_param_schema"]["config"] = self._generate_unilab_json_command_schema( + enhanced_info["init_params"], "__init__" + )["properties"]["goal"] + device_config["init_param_schema"]["data"] = self._generate_status_types_schema( + enhanced_info["status_methods"] + ) + + device_config.pop("schema", None) + device_config["class"]["action_value_mappings"] = dict( + sorted(device_config["class"]["action_value_mappings"].items()) + ) + for action_name, action_config in device_config["class"]["action_value_mappings"].items(): + if "handles" not in action_config: + action_config["handles"] = {} + elif isinstance(action_config["handles"], list): + if len(action_config["handles"]): + logger.error(f"设备{device_id} {action_name} 的handles配置错误,应该是字典类型") + continue + else: + action_config["handles"] = {} + if "type" in action_config: + action_type_str: str = action_config["type"] + if not action_type_str.startswith("UniLabJsonCommand"): + try: + target_type = self._replace_type_with_class( + action_type_str, device_id, f"动作 {action_name}" + ) + except ROSMsgNotFound: + continue + action_str_type_mapping[action_type_str] = target_type + if target_type is not None: + action_config["goal_default"] = yaml.safe_load( + io.StringIO(get_yaml_from_goal_type(target_type.Goal)) + ) + action_config["schema"] = ros_action_to_json_schema(target_type) + else: + logger.warning( + f"[UniLab Registry] 设备 {device_id} 的动作 {action_name} 类型为空,跳过替换" + ) + complete_data[device_id] = copy.deepcopy(dict(sorted(device_config.items()))) + for status_name, status_type in device_config["class"]["status_types"].items(): + device_config["class"]["status_types"][status_name] = status_str_type_mapping[status_type] + for action_name, action_config in device_config["class"]["action_value_mappings"].items(): + if action_config["type"] not in action_str_type_mapping: + continue + action_config["type"] = action_str_type_mapping[action_config["type"]] + self._add_builtin_actions(device_config, device_id) + device_config["file_path"] = str(file.absolute()).replace("\\", "/") + device_config["registry_type"] = "device" + device_ids.append(device_id) + + complete_data = dict(sorted(complete_data.items())) + complete_data = copy.deepcopy(complete_data) + try: + with open(file, "w", encoding="utf-8") as f: + yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper) + except Exception as e: + logger.warning(f"[UniLab Registry] 写入设备文件失败: {file}, 错误: {e}") + + return data, complete_data, True, device_ids + def load_device_types(self, path: os.PathLike, complete_registry: bool): - # return abs_path = Path(path).absolute() devices_path = abs_path / "devices" device_comms_path = abs_path / "device_comms" files = list(devices_path.glob("*.yaml")) + list(device_comms_path.glob("*.yaml")) - logger.trace( # type: ignore + logger.trace( f"[UniLab Registry] devices: {devices_path.exists()}, device_comms: {device_comms_path.exists()}, " + f"total: {len(files)}" ) - current_device_number = len(self.device_type_registry) + 1 + + if not files: + return + from unilabos.app.web.utils.action_utils import get_yaml_from_goal_type - for i, file in enumerate(files): - with open(file, encoding="utf-8", mode="r") as f: - data = yaml.safe_load(io.StringIO(f.read())) - complete_data = {} - action_str_type_mapping = { - "UniLabJsonCommand": "UniLabJsonCommand", - "UniLabJsonCommandAsync": "UniLabJsonCommandAsync", + # 使用线程池并行加载 + max_workers = min(8, len(files)) + results = [] + + with ThreadPoolExecutor(max_workers=max_workers) as executor: + future_to_file = { + executor.submit(self._load_single_device_file, file, complete_registry, get_yaml_from_goal_type): file + for file in files } - status_str_type_mapping = {} - if data: - # 在添加到注册表前处理类型替换 - for device_id, device_config in data.items(): - # 添加文件路径信息 - 使用规范化的完整文件路径 - if "version" not in device_config: - device_config["version"] = "1.0.0" - if "category" not in device_config: - device_config["category"] = [file.stem] - elif file.stem not in device_config["category"]: - device_config["category"].append(file.stem) - if "config_info" not in device_config: - device_config["config_info"] = [] - if "description" not in device_config: - device_config["description"] = "" - if "icon" not in device_config: - device_config["icon"] = "" - if "handles" not in device_config: - device_config["handles"] = [] - if "init_param_schema" not in device_config: - device_config["init_param_schema"] = {} - if "class" in device_config: - if ( - "status_types" not in device_config["class"] - or device_config["class"]["status_types"] is None - ): - device_config["class"]["status_types"] = {} - if ( - "action_value_mappings" not in device_config["class"] - or device_config["class"]["action_value_mappings"] is None - ): - device_config["class"]["action_value_mappings"] = {} - enhanced_info = {} - if complete_registry: - device_config["class"]["status_types"].clear() - enhanced_info = get_enhanced_class_info(device_config["class"]["module"], use_dynamic=True) - if not enhanced_info.get("dynamic_import_success", False): - continue - device_config["class"]["status_types"].update( - {k: v["return_type"] for k, v in enhanced_info["status_methods"].items()} - ) - for status_name, status_type in device_config["class"]["status_types"].items(): - if isinstance(status_type, tuple) or status_type in ["Any", "None", "Unknown"]: - status_type = "String" # 替换成ROS的String,便于显示 - device_config["class"]["status_types"][status_name] = status_type - try: - target_type = self._replace_type_with_class( - status_type, device_id, f"状态 {status_name}" - ) - except ROSMsgNotFound: - continue - if target_type in [ - dict, - list, - ]: # 对于嵌套类型返回的对象,暂时处理成字符串,无法直接进行转换 - target_type = String - status_str_type_mapping[status_type] = target_type - device_config["class"]["status_types"] = dict( - sorted(device_config["class"]["status_types"].items()) - ) - if complete_registry: - # 保存原有的 action 配置(用于保留 schema 的 description 和 handles 等) - old_action_configs = {} - for action_name, action_config in device_config["class"]["action_value_mappings"].items(): - old_action_configs[action_name] = action_config + for future in as_completed(future_to_file): + file = future_to_file[future] + try: + data, complete_data, is_valid, device_ids = future.result() + if is_valid: + results.append((file, data, device_ids)) + except Exception as e: + logger.warning(f"[UniLab Registry] 处理设备文件异常: {file}, 错误: {e}") - device_config["class"]["action_value_mappings"] = { - k: v - for k, v in device_config["class"]["action_value_mappings"].items() - if not k.startswith("auto-") - } - # 处理动作值映射 - device_config["class"]["action_value_mappings"].update( - { - f"auto-{k}": { - "type": "UniLabJsonCommandAsync" if v["is_async"] else "UniLabJsonCommand", - "goal": {}, - "feedback": {}, - "result": {}, - "schema": self._generate_unilab_json_command_schema( - v["args"], - k, - v.get("return_annotation"), - # 传入旧的 schema 以保留字段 description - old_action_configs.get(f"auto-{k}", {}).get("schema"), - ), - "goal_default": {i["name"]: i["default"] for i in v["args"]}, - # 保留原有的 handles 配置 - "handles": old_action_configs.get(f"auto-{k}", {}).get("handles", []), - "placeholder_keys": { - i["name"]: ( - "unilabos_resources" - if i["type"] == "unilabos.registry.placeholder_type:ResourceSlot" - or i["type"] - == ("list", "unilabos.registry.placeholder_type:ResourceSlot") - else "unilabos_devices" - ) - for i in v["args"] - if i.get("type", "") - in [ - "unilabos.registry.placeholder_type:ResourceSlot", - "unilabos.registry.placeholder_type:DeviceSlot", - ("list", "unilabos.registry.placeholder_type:ResourceSlot"), - ("list", "unilabos.registry.placeholder_type:DeviceSlot"), - ] - }, - } - # 不生成已配置action的动作 - for k, v in enhanced_info["action_methods"].items() - if k not in device_config["class"]["action_value_mappings"] - } - ) - # 恢复原有的 description 信息(非 auto- 开头的动作) - for action_name, old_config in old_action_configs.items(): - if action_name in device_config["class"]["action_value_mappings"]: # 有一些会被删除 - old_schema = old_config.get("schema", {}) - if "description" in old_schema and old_schema["description"]: - device_config["class"]["action_value_mappings"][action_name]["schema"][ - "description" - ] = old_schema["description"] - device_config["init_param_schema"] = {} - device_config["init_param_schema"]["config"] = self._generate_unilab_json_command_schema( - enhanced_info["init_params"], "__init__" - )["properties"]["goal"] - device_config["init_param_schema"]["data"] = self._generate_status_types_schema( - enhanced_info["status_methods"] - ) - - device_config.pop("schema", None) - device_config["class"]["action_value_mappings"] = dict( - sorted(device_config["class"]["action_value_mappings"].items()) - ) - for action_name, action_config in device_config["class"]["action_value_mappings"].items(): - if "handles" not in action_config: - action_config["handles"] = {} - elif isinstance(action_config["handles"], list): - if len(action_config["handles"]): - logger.error(f"设备{device_id} {action_name} 的handles配置错误,应该是字典类型") - continue - else: - action_config["handles"] = {} - if "type" in action_config: - action_type_str: str = action_config["type"] - # 通过Json发放指令,而不是通过特殊的ros action进行处理 - if not action_type_str.startswith("UniLabJsonCommand"): - try: - target_type = self._replace_type_with_class( - action_type_str, device_id, f"动作 {action_name}" - ) - except ROSMsgNotFound: - continue - action_str_type_mapping[action_type_str] = target_type - if target_type is not None: - action_config["goal_default"] = yaml.safe_load( - io.StringIO(get_yaml_from_goal_type(target_type.Goal)) - ) - action_config["schema"] = ros_action_to_json_schema(target_type) - else: - logger.warning( - f"[UniLab Registry] 设备 {device_id} 的动作 {action_name} 类型为空,跳过替换" - ) - complete_data[device_id] = copy.deepcopy(dict(sorted(device_config.items()))) # 稍后dump到文件 - for status_name, status_type in device_config["class"]["status_types"].items(): - device_config["class"]["status_types"][status_name] = status_str_type_mapping[status_type] - for action_name, action_config in device_config["class"]["action_value_mappings"].items(): - if action_config["type"] not in action_str_type_mapping: - continue - action_config["type"] = action_str_type_mapping[action_config["type"]] - # 添加内置的驱动命令动作 - self._add_builtin_actions(device_config, device_id) - device_config["file_path"] = str(file.absolute()).replace("\\", "/") - device_config["registry_type"] = "device" - logger.trace( # type: ignore - f"[UniLab Registry] Device-{current_device_number} File-{i+1}/{len(files)} Add {device_id} " + # 线程安全地更新注册表 + current_device_number = len(self.device_type_registry) + 1 + with self._registry_lock: + for file, data, device_ids in results: + self.device_type_registry.update(data) + for device_id in device_ids: + logger.trace( + f"[UniLab Registry] Device-{current_device_number} Add {device_id} " + f"[{data[device_id].get('name', '未命名设备')}]" ) current_device_number += 1 - complete_data = dict(sorted(complete_data.items())) - complete_data = copy.deepcopy(complete_data) - with open(file, "w", encoding="utf-8") as f: - yaml.dump(complete_data, f, allow_unicode=True, default_flow_style=False, Dumper=NoAliasDumper) - self.device_type_registry.update(data) - else: - logger.debug( - f"[UniLab Registry] Device File-{i+1}/{len(files)} Not Valid YAML File: {file.absolute()}" - ) + + # 记录无效文件 + valid_files = {r[0] for r in results} + for file in files: + if file not in valid_files: + logger.debug(f"[UniLab Registry] Device File Not Valid YAML File: {file.absolute()}") def obtain_registry_device_info(self): devices = [] diff --git a/unilabos/resources/graphio.py b/unilabos/resources/graphio.py index 1c514d4..e1f3a0b 100644 --- a/unilabos/resources/graphio.py +++ b/unilabos/resources/graphio.py @@ -260,7 +260,7 @@ def read_node_link_json( resource_tree_set = canonicalize_nodes_data(nodes) # 标准化边数据 - links = data.get("links", []) + links = data.get("links", data.get("edges", [])) standardized_links = canonicalize_links_ports(links, resource_tree_set) # 构建 NetworkX 图(需要转换回 dict 格式) @@ -597,6 +597,8 @@ def resource_plr_to_ulab(resource_plr: "ResourcePLR", parent_name: str = None, w "tube": "tube", "bottle_carrier": "bottle_carrier", "plate_adapter": "plate_adapter", + "electrode_sheet": "electrode_sheet", + "material_hole": "material_hole", } if source in replace_info: return replace_info[source] diff --git a/unilabos/resources/resource_tracker.py b/unilabos/resources/resource_tracker.py index 4097782..8a0fef3 100644 --- a/unilabos/resources/resource_tracker.py +++ b/unilabos/resources/resource_tracker.py @@ -13,6 +13,9 @@ if TYPE_CHECKING: from pylabrobot.resources import Resource as PLRResource +EXTRA_CLASS = "unilabos_resource_class" + + class ResourceDictPositionSize(BaseModel): depth: float = Field(description="Depth", default=0.0) # z width: float = Field(description="Width", default=0.0) # x @@ -393,7 +396,7 @@ class ResourceTreeSet(object): "parent": parent_resource, # 直接传入 ResourceDict 对象 "parent_uuid": parent_uuid, # 使用 parent_uuid 而不是 parent 对象 "type": replace_plr_type(d.get("category", "")), - "class": d.get("class", ""), + "class": extra.get(EXTRA_CLASS, ""), "position": pos, "pose": pos, "config": { @@ -443,7 +446,7 @@ class ResourceTreeSet(object): trees.append(tree_instance) return cls(trees) - def to_plr_resources(self) -> List["PLRResource"]: + def to_plr_resources(self, skip_devices=True) -> List["PLRResource"]: """ 将 ResourceTreeSet 转换为 PLR 资源列表 @@ -468,6 +471,7 @@ class ResourceTreeSet(object): name_to_uuid[node.res_content.name] = node.res_content.uuid all_states[node.res_content.name] = node.res_content.data name_to_extra[node.res_content.name] = node.res_content.extra + name_to_extra[node.res_content.name][EXTRA_CLASS] = node.res_content.klass for child in node.children: collect_node_data(child, name_to_uuid, all_states, name_to_extra) @@ -512,7 +516,10 @@ class ResourceTreeSet(object): plr_dict = node_to_plr_dict(tree.root_node, has_model) try: sub_cls = find_subclass(plr_dict["type"], PLRResource) - if sub_cls is None: + if skip_devices and plr_dict["type"] == "device": + logger.info(f"跳过更新 {plr_dict['name']} 设备是class") + continue + elif sub_cls is None: raise ValueError( f"无法找到类型 {plr_dict['type']} 对应的 PLR 资源类。原始信息:{tree.root_node.res_content}" ) @@ -520,6 +527,10 @@ class ResourceTreeSet(object): if "category" not in spec.parameters: plr_dict.pop("category", None) plr_resource = sub_cls.deserialize(plr_dict, allow_marshal=True) + from pylabrobot.resources import Coordinate + from pylabrobot.serializer import deserialize + location = cast(Coordinate, deserialize(plr_dict["location"])) + plr_resource.location = location plr_resource.load_all_state(all_states) # 使用 DeviceNodeResourceTracker 设置 UUID 和 Extra tracker.loop_set_uuid(plr_resource, name_to_uuid) @@ -986,7 +997,7 @@ class DeviceNodeResourceTracker(object): extra = name_to_extra_map[resource_name] self.set_resource_extra(res, extra) if len(extra): - logger.debug(f"设置资源Extra: {resource_name} -> {extra}") + logger.trace(f"设置资源Extra: {resource_name} -> {extra}") return 1 return 0 diff --git a/unilabos/ros/msgs/message_converter.py b/unilabos/ros/msgs/message_converter.py index 632d5e1..b526d5f 100644 --- a/unilabos/ros/msgs/message_converter.py +++ b/unilabos/ros/msgs/message_converter.py @@ -770,13 +770,16 @@ def ros_message_to_json_schema(msg_class: Any, field_name: str) -> Dict[str, Any return schema -def ros_action_to_json_schema(action_class: Any, description="") -> Dict[str, Any]: +def ros_action_to_json_schema( + action_class: Any, description="", previous_schema: Optional[Dict[str, Any]] = None +) -> Dict[str, Any]: """ 将 ROS Action 类转换为 JSON Schema Args: action_class: ROS Action 类 description: 描述 + previous_schema: 之前的 schema,用于保留 goal/feedback/result 下一级字段的 description Returns: 完整的 JSON Schema 定义 @@ -810,9 +813,44 @@ def ros_action_to_json_schema(action_class: Any, description="") -> Dict[str, An "required": ["goal"], } + # 保留之前 schema 中 goal/feedback/result 下一级字段的 description + if previous_schema: + _preserve_field_descriptions(schema, previous_schema) + return schema +def _preserve_field_descriptions( + new_schema: Dict[str, Any], previous_schema: Dict[str, Any] +) -> None: + """ + 保留之前 schema 中 goal/feedback/result 下一级字段的 description 和 title + + Args: + new_schema: 新生成的 schema(会被修改) + previous_schema: 之前的 schema + """ + for section in ["goal", "feedback", "result"]: + new_section = new_schema.get("properties", {}).get(section, {}) + prev_section = previous_schema.get("properties", {}).get(section, {}) + + if not new_section or not prev_section: + continue + + new_props = new_section.get("properties", {}) + prev_props = prev_section.get("properties", {}) + + for field_name, field_schema in new_props.items(): + if field_name in prev_props: + prev_field = prev_props[field_name] + # 保留字段的 description + if "description" in prev_field and prev_field["description"]: + field_schema["description"] = prev_field["description"] + # 保留字段的 title(用户自定义的中文名) + if "title" in prev_field and prev_field["title"]: + field_schema["title"] = prev_field["title"] + + def convert_ros_action_to_jsonschema( action_name_or_type: Union[str, Type], output_file: Optional[str] = None, format: str = "json" ) -> Dict[str, Any]: diff --git a/unilabos/ros/nodes/base_device_node.py b/unilabos/ros/nodes/base_device_node.py index 737167a..3d1ffda 100644 --- a/unilabos/ros/nodes/base_device_node.py +++ b/unilabos/ros/nodes/base_device_node.py @@ -49,7 +49,6 @@ from unilabos.resources.resource_tracker import ( ResourceTreeInstance, ResourceDictInstance, ) -from unilabos.ros.x.rclpyx import get_event_loop from unilabos.ros.utils.driver_creator import WorkstationNodeCreator, PyLabRobotCreator, DeviceClassCreator from rclpy.task import Task, Future from unilabos.utils.import_manager import default_manager @@ -185,7 +184,7 @@ class PropertyPublisher: f"创建发布者 {name} 失败,可能由于注册表有误,类型: {msg_type},错误: {ex}\n{traceback.format_exc()}" ) self.timer = node.create_timer(self.timer_period, self.publish_property) - self.__loop = get_event_loop() + self.__loop = ROS2DeviceNode.get_asyncio_loop() str_msg_type = str(msg_type)[8:-2] self.node.lab_logger().trace(f"发布属性: {name}, 类型: {str_msg_type}, 周期: {initial_period}秒, QoS: {qos}") @@ -885,6 +884,9 @@ class BaseROS2DeviceNode(Node, Generic[T]): parent_appended = True # 加载状态 + original_instance.location = plr_resource.location + original_instance.rotation = plr_resource.rotation + original_instance.barcode = plr_resource.barcode original_instance.load_all_state(states) child_count = len(original_instance.get_all_children()) self.lab_logger().info( @@ -1320,19 +1322,32 @@ class BaseROS2DeviceNode(Node, Generic[T]): resource_inputs = action_kwargs[k] if is_sequence else [action_kwargs[k]] # 批量查询资源 - queried_resources = [] - for resource_data in resource_inputs: + queried_resources: list = [None] * len(resource_inputs) + uuid_indices: list[tuple[int, str, dict]] = [] # (index, uuid, resource_data) + + # 第一遍:处理没有uuid的资源,收集有uuid的资源信息 + for idx, resource_data in enumerate(resource_inputs): unilabos_uuid = resource_data.get("data", {}).get("unilabos_uuid") if unilabos_uuid is None: plr_resource = await self.get_resource_with_dir( resource_id=resource_data["id"], with_children=True ) + if "sample_id" in resource_data: + plr_resource.unilabos_extra["sample_uuid"] = resource_data["sample_id"] + queried_resources[idx] = plr_resource else: - resource_tree = await self.get_resource([unilabos_uuid]) - plr_resource = resource_tree.to_plr_resources()[0] - if "sample_id" in resource_data: - plr_resource.unilabos_extra["sample_uuid"] = resource_data["sample_id"] - queried_resources.append(plr_resource) + uuid_indices.append((idx, unilabos_uuid, resource_data)) + + # 第二遍:批量查询有uuid的资源 + if uuid_indices: + uuids = [item[1] for item in uuid_indices] + resource_tree = await self.get_resource(uuids) + plr_resources = resource_tree.to_plr_resources() + for i, (idx, _, resource_data) in enumerate(uuid_indices): + plr_resource = plr_resources[i] + if "sample_id" in resource_data: + plr_resource.unilabos_extra["sample_uuid"] = resource_data["sample_id"] + queried_resources[idx] = plr_resource self.lab_logger().debug(f"资源查询结果: 共 {len(queried_resources)} 个资源") @@ -1757,6 +1772,15 @@ class ROS2DeviceNode: 它不继承设备类,而是通过代理模式访问设备类的属性和方法。 """ + # 类变量,用于循环管理 + _asyncio_loop = None + _asyncio_loop_running = False + _asyncio_loop_thread = None + + @classmethod + def get_asyncio_loop(cls): + return cls._asyncio_loop + @staticmethod async def safe_task_wrapper(trace_callback, func, **kwargs): try: @@ -1833,6 +1857,11 @@ class ROS2DeviceNode: print_publish: 是否打印发布信息 driver_is_ros: """ + # 在初始化时检查循环状态 + if ROS2DeviceNode._asyncio_loop_running and ROS2DeviceNode._asyncio_loop_thread is not None: + pass + elif ROS2DeviceNode._asyncio_loop_thread is None: + self._start_loop() # 保存设备类是否支持异步上下文 self._has_async_context = hasattr(driver_class, "__aenter__") and hasattr(driver_class, "__aexit__") @@ -1924,6 +1953,17 @@ class ROS2DeviceNode: except Exception as e: self._ros_node.lab_logger().error(f"设备后初始化失败: {e}") + def _start_loop(self): + def run_event_loop(): + loop = asyncio.new_event_loop() + ROS2DeviceNode._asyncio_loop = loop + asyncio.set_event_loop(loop) + loop.run_forever() + + ROS2DeviceNode._asyncio_loop_thread = threading.Thread(target=run_event_loop, daemon=True, name="ROS2DeviceNode") + ROS2DeviceNode._asyncio_loop_thread.start() + logger.info(f"循环线程已启动") + class DeviceInfoType(TypedDict): id: str diff --git a/unilabos/ros/nodes/presets/host_node.py b/unilabos/ros/nodes/presets/host_node.py index 101476a..e95b393 100644 --- a/unilabos/ros/nodes/presets/host_node.py +++ b/unilabos/ros/nodes/presets/host_node.py @@ -5,7 +5,8 @@ import threading import time import traceback import uuid -from typing import TYPE_CHECKING, Optional, Dict, Any, List, ClassVar, Set, TypedDict, Union +from typing import TYPE_CHECKING, Optional, Dict, Any, List, ClassVar, Set, Union +from typing_extensions import TypedDict from action_msgs.msg import GoalStatus from geometry_msgs.msg import Point @@ -62,6 +63,18 @@ class TestResourceReturn(TypedDict): devices: List[DeviceSlot] +class TestLatencyReturn(TypedDict): + """test_latency方法的返回值类型""" + + avg_rtt_ms: float + avg_time_diff_ms: float + max_time_error_ms: float + task_delay_ms: float + raw_delay_ms: float + test_count: int + status: str + + class HostNode(BaseROS2DeviceNode): """ 主机节点类,负责管理设备、资源和控制器 @@ -795,6 +808,7 @@ class HostNode(BaseROS2DeviceNode): goal_msg = convert_to_ros_msg(action_client._action_type.Goal(), action_kwargs) self.lab_logger().info(f"[Host Node] Sending goal for {action_id}: {str(goal_msg)[:1000]}") + self.lab_logger().trace(f"[Host Node] Sending goal for {action_id}: {action_kwargs}") self.lab_logger().trace(f"[Host Node] Sending goal for {action_id}: {goal_msg}") action_client.wait_for_server() goal_uuid_obj = UUID(uuid=list(u.bytes)) @@ -853,8 +867,13 @@ class HostNode(BaseROS2DeviceNode): # 适配后端的一些额外处理 return_value = return_info.get("return_value") if isinstance(return_value, dict): - unilabos_samples = return_info.get("unilabos_samples") - if isinstance(unilabos_samples, list): + unilabos_samples = return_value.pop("unilabos_samples", None) + if isinstance(unilabos_samples, list) and unilabos_samples: + self.lab_logger().info( + f"[Host Node] Job {job_id[:8]} returned {len(unilabos_samples)} sample(s): " + f"{[s.get('name', s.get('id', 'unknown')) if isinstance(s, dict) else str(s)[:20] for s in unilabos_samples[:5]]}" + f"{'...' if len(unilabos_samples) > 5 else ''}" + ) return_info["unilabos_samples"] = unilabos_samples suc = return_info.get("suc", False) if not suc: @@ -881,7 +900,7 @@ class HostNode(BaseROS2DeviceNode): # 清理 _goals 中的记录 if job_id in self._goals: del self._goals[job_id] - self.lab_logger().debug(f"[Host Node] Removed goal {job_id[:8]} from _goals") + self.lab_logger().trace(f"[Host Node] Removed goal {job_id[:8]} from _goals") # 存储结果供 HTTP API 查询 try: @@ -1326,10 +1345,20 @@ class HostNode(BaseROS2DeviceNode): self.lab_logger().debug(f"[Host Node-Resource] List parameters: {request}") return response - def test_latency(self): + def test_latency(self) -> TestLatencyReturn: """ 测试网络延迟的action实现 通过5次ping-pong机制校对时间误差并计算实际延迟 + + Returns: + TestLatencyReturn: 包含延迟测试结果的字典,包括: + - avg_rtt_ms: 平均往返时间(毫秒) + - avg_time_diff_ms: 平均时间差(毫秒) + - max_time_error_ms: 最大时间误差(毫秒) + - task_delay_ms: 实际任务延迟(毫秒),-1表示无法计算 + - raw_delay_ms: 原始时间差(毫秒),-1表示无法计算 + - test_count: 有效测试次数 + - status: 测试状态,"success"表示成功,"all_timeout"表示全部超时 """ import uuid as uuid_module @@ -1392,7 +1421,15 @@ class HostNode(BaseROS2DeviceNode): if not ping_results: self.lab_logger().error("❌ 所有ping-pong测试都失败了") - return {"status": "all_timeout"} + return { + "avg_rtt_ms": -1.0, + "avg_time_diff_ms": -1.0, + "max_time_error_ms": -1.0, + "task_delay_ms": -1.0, + "raw_delay_ms": -1.0, + "test_count": 0, + "status": "all_timeout", + } # 统计分析 rtts = [r["rtt_ms"] for r in ping_results] @@ -1400,7 +1437,7 @@ class HostNode(BaseROS2DeviceNode): avg_rtt_ms = sum(rtts) / len(rtts) avg_time_diff_ms = sum(time_diffs) / len(time_diffs) - max_time_diff_error_ms = max(abs(min(time_diffs)), abs(max(time_diffs))) + max_time_diff_error_ms: float = max(abs(min(time_diffs)), abs(max(time_diffs))) self.lab_logger().info("-" * 50) self.lab_logger().info("[测试统计]") @@ -1440,7 +1477,7 @@ class HostNode(BaseROS2DeviceNode): self.lab_logger().info("=" * 60) - return { + res: TestLatencyReturn = { "avg_rtt_ms": avg_rtt_ms, "avg_time_diff_ms": avg_time_diff_ms, "max_time_error_ms": max_time_diff_error_ms, @@ -1451,9 +1488,14 @@ class HostNode(BaseROS2DeviceNode): "test_count": len(ping_results), "status": "success", } + return res def test_resource( - self, resource: ResourceSlot = None, resources: List[ResourceSlot] = None, device: DeviceSlot = None, devices: List[DeviceSlot] = None + self, + resource: ResourceSlot = None, + resources: List[ResourceSlot] = None, + device: DeviceSlot = None, + devices: List[DeviceSlot] = None, ) -> TestResourceReturn: if resources is None: resources = [] @@ -1514,7 +1556,9 @@ class HostNode(BaseROS2DeviceNode): # 构建服务地址 srv_address = f"/srv{namespace}/s2c_resource_tree" - self.lab_logger().trace(f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation started -------") + self.lab_logger().trace( + f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation started -------" + ) # 创建服务客户端 sclient = self.create_client(SerialCommand, srv_address) @@ -1549,7 +1593,9 @@ class HostNode(BaseROS2DeviceNode): time.sleep(0.05) response = future.result() - self.lab_logger().trace(f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation completed -------") + self.lab_logger().trace( + f"[Host Node-Resource] Host -> {device_id} ResourceTree {action} operation completed -------" + ) return True except Exception as e: diff --git a/unilabos/ros/nodes/presets/workstation.py b/unilabos/ros/nodes/presets/workstation.py index ed3fe14..f30e33b 100644 --- a/unilabos/ros/nodes/presets/workstation.py +++ b/unilabos/ros/nodes/presets/workstation.py @@ -6,8 +6,6 @@ from typing import List, Dict, Any, Optional, TYPE_CHECKING import rclpy from rosidl_runtime_py import message_to_ordereddict -from unilabos_msgs.msg import Resource -from unilabos_msgs.srv import ResourceUpdate from unilabos.messages import * # type: ignore # protocol names from rclpy.action import ActionServer, ActionClient @@ -15,7 +13,6 @@ from rclpy.action.server import ServerGoalHandle from unilabos_msgs.srv._serial_command import SerialCommand_Request, SerialCommand_Response from unilabos.compile import action_protocol_generators -from unilabos.resources.graphio import nested_dict_to_list from unilabos.ros.initialize_device import initialize_device_from_dict from unilabos.ros.msgs.message_converter import ( get_action_type, @@ -231,15 +228,15 @@ class ROS2WorkstationNode(BaseROS2DeviceNode): try: # 统一处理单个或多个资源 resource_id = ( - protocol_kwargs[k]["id"] if v == "unilabos_msgs/Resource" else protocol_kwargs[k][0]["id"] + protocol_kwargs[k]["id"] + if v == "unilabos_msgs/Resource" + else protocol_kwargs[k][0]["id"] ) resource_uuid = protocol_kwargs[k].get("uuid", None) r = SerialCommand_Request() r.command = json.dumps({"id": resource_id, "uuid": resource_uuid, "with_children": True}) # 发送请求并等待响应 - response: SerialCommand_Response = await self._resource_clients[ - "resource_get" - ].call_async( + response: SerialCommand_Response = await self._resource_clients["resource_get"].call_async( r ) # type: ignore raw_data = json.loads(response.response) @@ -307,12 +304,52 @@ class ROS2WorkstationNode(BaseROS2DeviceNode): # 向Host更新物料当前状态 for k, v in goal.get_fields_and_field_types().items(): - if v in ["unilabos_msgs/Resource", "sequence"]: - r = ResourceUpdate.Request() - r.resources = [ - convert_to_ros_msg(Resource, rs) for rs in nested_dict_to_list(protocol_kwargs[k]) - ] - response = await self._resource_clients["resource_update"].call_async(r) + if v not in ["unilabos_msgs/Resource", "sequence"]: + continue + self.lab_logger().info(f"更新资源状态: {k}") + try: + # 去重:使用 seen 集合获取唯一的资源对象 + seen = set() + unique_resources = [] + + # 获取资源数据,统一转换为列表 + resource_data = protocol_kwargs[k] + is_sequence = v != "unilabos_msgs/Resource" + if not is_sequence: + resource_list = [resource_data] if isinstance(resource_data, dict) else resource_data + else: + # 处理序列类型,可能是嵌套列表 + resource_list = [] + if isinstance(resource_data, list): + for item in resource_data: + if isinstance(item, list): + resource_list.extend(item) + else: + resource_list.append(item) + else: + resource_list = [resource_data] + + for res_data in resource_list: + if not isinstance(res_data, dict): + continue + res_name = res_data.get("id") or res_data.get("name") + if not res_name: + continue + + # 使用 resource_tracker 获取本地 PLR 实例 + plr = self.resource_tracker.figure_resource({"name": res_name}, try_mode=False) + # 获取父资源 + res = self.resource_tracker.parent_resource(plr) + if id(res) not in seen: + seen.add(id(res)) + unique_resources.append(res) + + # 使用新的资源树接口更新 + if unique_resources: + await self.update_resource(unique_resources) + except Exception as e: + self.lab_logger().error(f"资源更新失败: {e}") + self.lab_logger().error(traceback.format_exc()) # 设置成功状态和返回值 execution_success = True diff --git a/unilabos/ros/x/rclpyx.py b/unilabos/ros/x/rclpyx.py deleted file mode 100644 index a723922..0000000 --- a/unilabos/ros/x/rclpyx.py +++ /dev/null @@ -1,182 +0,0 @@ -import asyncio -from asyncio import events -import threading - -import rclpy -from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy -from rclpy.executors import await_or_execute, Executor -from rclpy.action import ActionClient, ActionServer -from rclpy.action.server import ServerGoalHandle, GoalResponse, GoalInfo, GoalStatus -from std_msgs.msg import String -from action_tutorials_interfaces.action import Fibonacci - - -loop = None - -def get_event_loop(): - global loop - return loop - - -async def default_handle_accepted_callback_async(goal_handle): - """Execute the goal.""" - await goal_handle.execute() - - -class ServerGoalHandleX(ServerGoalHandle): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - async def execute(self, execute_callback=None): - # It's possible that there has been a request to cancel the goal prior to executing. - # In this case we want to avoid the illegal state transition to EXECUTING - # but still call the users execute callback to let them handle canceling the goal. - if not self.is_cancel_requested: - self._update_state(_rclpy.GoalEvent.EXECUTE) - await self._action_server.notify_execute_async(self, execute_callback) - - -class ActionServerX(ActionServer): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.register_handle_accepted_callback(default_handle_accepted_callback_async) - - async def _execute_goal_request(self, request_header_and_message): - request_header, goal_request = request_header_and_message - goal_uuid = goal_request.goal_id - goal_info = GoalInfo() - goal_info.goal_id = goal_uuid - - self._node.get_logger().debug('New goal request with ID: {0}'.format(goal_uuid.uuid)) - - # Check if goal ID is already being tracked by this action server - with self._lock: - goal_id_exists = self._handle.goal_exists(goal_info) - - accepted = False - if not goal_id_exists: - # Call user goal callback - response = await await_or_execute(self._goal_callback, goal_request.goal) - if not isinstance(response, GoalResponse): - self._node.get_logger().warning( - 'Goal request callback did not return a GoalResponse type. Rejecting goal.') - else: - accepted = GoalResponse.ACCEPT == response - - if accepted: - # Stamp time of acceptance - goal_info.stamp = self._node.get_clock().now().to_msg() - - # Create a goal handle - try: - with self._lock: - goal_handle = ServerGoalHandleX(self, goal_info, goal_request.goal) - except RuntimeError as e: - self._node.get_logger().error( - 'Failed to accept new goal with ID {0}: {1}'.format(goal_uuid.uuid, e)) - accepted = False - else: - self._goal_handles[bytes(goal_uuid.uuid)] = goal_handle - - # Send response - response_msg = self._action_type.Impl.SendGoalService.Response() - response_msg.accepted = accepted - response_msg.stamp = goal_info.stamp - self._handle.send_goal_response(request_header, response_msg) - - if not accepted: - self._node.get_logger().debug('New goal rejected: {0}'.format(goal_uuid.uuid)) - return - - self._node.get_logger().debug('New goal accepted: {0}'.format(goal_uuid.uuid)) - - # Provide the user a reference to the goal handle - # await await_or_execute(self._handle_accepted_callback, goal_handle) - asyncio.create_task(self._handle_accepted_callback(goal_handle)) - - async def notify_execute_async(self, goal_handle, execute_callback): - # Use provided callback, defaulting to a previously registered callback - if execute_callback is None: - if self._execute_callback is None: - return - execute_callback = self._execute_callback - - # Schedule user callback for execution - self._node.get_logger().info(f"{events.get_running_loop()}") - asyncio.create_task(self._execute_goal(execute_callback, goal_handle)) - # loop = asyncio.new_event_loop() - # asyncio.set_event_loop(loop) - # task = loop.create_task(self._execute_goal(execute_callback, goal_handle)) - # await task - - -class ActionClientX(ActionClient): - feedback_queue = asyncio.Queue() - - async def feedback_cb(self, msg): - await self.feedback_queue.put(msg) - - async def send_goal_async(self, goal_msg): - goal_future = super().send_goal_async( - goal_msg, - feedback_callback=self.feedback_cb - ) - client_goal_handle = await asyncio.ensure_future(goal_future) - if not client_goal_handle.accepted: - raise Exception("Goal rejected.") - result_future = client_goal_handle.get_result_async() - while True: - feedback_future = asyncio.ensure_future(self.feedback_queue.get()) - tasks = [result_future, feedback_future] - await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) - if result_future.done(): - result = result_future.result().result - yield (None, result) - break - else: - feedback = feedback_future.result().feedback - yield (feedback, None) - - -async def main(node): - print('Node started.') - action_client = ActionClientX(node, Fibonacci, 'fibonacci') - goal_msg = Fibonacci.Goal() - goal_msg.order = 10 - async for (feedback, result) in action_client.send_goal_async(goal_msg): - if feedback: - print(f'Feedback: {feedback}') - else: - print(f'Result: {result}') - print('Finished.') - - -async def ros_loop_node(node): - while rclpy.ok(): - rclpy.spin_once(node, timeout_sec=0) - await asyncio.sleep(1e-4) - - -async def ros_loop(executor: Executor): - while rclpy.ok(): - executor.spin_once(timeout_sec=0) - await asyncio.sleep(1e-4) - - -def run_event_loop(): - global loop - loop = asyncio.new_event_loop() - asyncio.set_event_loop(loop) - loop.run_forever() - - -def run_event_loop_in_thread(): - thread = threading.Thread(target=run_event_loop, args=()) - thread.start() - - -if __name__ == "__main__": - rclpy.init() - node = rclpy.create_node('async_subscriber') - future = asyncio.wait([ros_loop(node), main()]) - asyncio.get_event_loop().run_until_complete(future) \ No newline at end of file diff --git a/unilabos/test/experiments/virtual_bench.json b/unilabos/test/experiments/virtual_bench.json new file mode 100644 index 0000000..d37fa6e --- /dev/null +++ b/unilabos/test/experiments/virtual_bench.json @@ -0,0 +1,28 @@ +{ + "nodes": [ + { + "id": "workbench_1", + "name": "虚拟工作台", + "children": [], + "parent": null, + "type": "device", + "class": "virtual_workbench", + "position": { + "x": 400, + "y": 300, + "z": 0 + }, + "config": { + "arm_operation_time": 3.0, + "heating_time": 10.0, + "num_heating_stations": 3 + }, + "data": { + "status": "Ready", + "arm_state": "idle", + "message": "工作台就绪" + } + } + ], + "links": [] +} diff --git a/unilabos/utils/README_LOGGING.md b/unilabos/utils/README_LOGGING.md deleted file mode 100644 index 9cb551b..0000000 --- a/unilabos/utils/README_LOGGING.md +++ /dev/null @@ -1,187 +0,0 @@ -# UniLabOS 日志配置说明 - -> **文件位置**: `unilabos/utils/log.py` -> **最后更新**: 2026-01-11 -> **维护者**: Uni-Lab-OS 开发团队 - -本文档说明 UniLabOS 日志系统中对第三方库和内部模块的日志级别配置,避免控制台被过多的 DEBUG 日志淹没。 - ---- - -## 📋 已屏蔽的日志 - -以下库/模块的日志已被设置为 **WARNING** 或 **INFO** 级别,不再显示 DEBUG 日志: - -### 1. pymodbus(Modbus 通信库) - -**配置位置**: `log.py` 第196-200行 - -```python -# pymodbus 库的日志太详细,设置为 WARNING -logging.getLogger('pymodbus').setLevel(logging.WARNING) -logging.getLogger('pymodbus.logging').setLevel(logging.WARNING) -logging.getLogger('pymodbus.logging.base').setLevel(logging.WARNING) -logging.getLogger('pymodbus.logging.decoders').setLevel(logging.WARNING) -``` - -**屏蔽原因**: -- pymodbus 在 DEBUG 级别会输出每一次 Modbus 通信的详细信息 -- 包括 `Processing: 0x5 0x1e 0x0 0x0...` 等原始数据 -- 包括 `decoded PDU function_code(3 sub -1) -> ReadHoldingRegistersResponse(...)` 等解码信息 -- 这些信息对日常使用价值不大,但会快速刷屏 - -**典型被屏蔽的日志**: -``` -[DEBUG] Processing: 0x5 0x1e 0x0 0x0 0x0 0x7 0x1 0x3 0x4 0x0 0x0 0x0 0x0 [handleFrame:72] [pymodbus.logging.base] -[DEBUG] decoded PDU function_code(3 sub -1) -> ReadHoldingRegistersResponse(...) [decode:79] [pymodbus.logging.decoders] -``` - ---- - -### 2. websockets(WebSocket 库) - -**配置位置**: `log.py` 第202-205行 - -```python -# websockets 库的日志输出较多,设置为 WARNING -logging.getLogger('websockets').setLevel(logging.WARNING) -logging.getLogger('websockets.client').setLevel(logging.WARNING) -logging.getLogger('websockets.server').setLevel(logging.WARNING) -``` - -**屏蔽原因**: -- WebSocket 连接、断开、心跳等信息在 DEBUG 级别会频繁输出 -- 对于长时间运行的服务,这些日志意义不大 - ---- - -### 3. ROS Host Node(设备状态更新) - -**配置位置**: `log.py` 第207-208行 - -```python -# ROS 节点的状态更新日志过于频繁,设置为 INFO -logging.getLogger('unilabos.ros.nodes.presets.host_node').setLevel(logging.INFO) -``` - -**屏蔽原因**: -- 设备状态更新(如手套箱压力)每隔几秒就会更新一次 -- DEBUG 日志会记录每一次状态变化,导致日志刷屏 -- 这些频繁的状态更新对调试价值不大 - -**典型被屏蔽的日志**: -``` -[DEBUG] [/devices/host_node] Status updated: BatteryStation.data_glove_box_pressure = 4.229457855224609 [property_callback:666] [unilabos.ros.nodes.presets.host_node] -``` - ---- - -### 4. asyncio 和 urllib3 - -**配置位置**: `log.py` 第224-225行 - -```python -logging.getLogger("asyncio").setLevel(logging.INFO) -logging.getLogger("urllib3").setLevel(logging.INFO) -``` - -**屏蔽原因**: -- asyncio: 异步 IO 的内部调试信息 -- urllib3: HTTP 请求库的连接池、重试等详细信息 - ---- - -## 🔧 如何临时启用这些日志(调试用) - -### 方法1: 修改 log.py(永久启用) - -在 `log.py` 的 `configure_logger()` 函数中,将对应库的日志级别改为 `logging.DEBUG`: - -```python -# 临时启用 pymodbus 的 DEBUG 日志 -logging.getLogger('pymodbus').setLevel(logging.DEBUG) -logging.getLogger('pymodbus.logging').setLevel(logging.DEBUG) -logging.getLogger('pymodbus.logging.base').setLevel(logging.DEBUG) -logging.getLogger('pymodbus.logging.decoders').setLevel(logging.DEBUG) -``` - -### 方法2: 在代码中临时启用(单次调试) - -在需要调试的代码文件中添加: - -```python -import logging - -# 临时启用 pymodbus DEBUG 日志 -logging.getLogger('pymodbus').setLevel(logging.DEBUG) - -# 你的 Modbus 调试代码 -... - -# 调试完成后恢复 -logging.getLogger('pymodbus').setLevel(logging.WARNING) -``` - -### 方法3: 使用环境变量或配置文件(推荐) - -未来可以考虑在启动参数中添加 `--debug-modbus` 等选项来动态控制。 - ---- - -## 📊 日志级别说明 - -| 级别 | 数值 | 用途 | 是否显示 | -|------|------|------|---------| -| TRACE | 5 | 最详细的跟踪信息 | ✅ | -| DEBUG | 10 | 调试信息 | ✅ | -| INFO | 20 | 一般信息 | ✅ | -| WARNING | 30 | 警告信息 | ✅ | -| ERROR | 40 | 错误信息 | ✅ | -| CRITICAL | 50 | 严重错误 | ✅ | - -**当前配置**: -- UniLabOS 自身代码: DEBUG 及以上全部显示 -- pymodbus/websockets: **WARNING** 及以上显示(屏蔽 DEBUG/INFO) -- ROS host_node: **INFO** 及以上显示(屏蔽 DEBUG) - ---- - -## ⚠️ 重要提示 - -### 修改生效时间 -- 修改 `log.py` 后需要 **重启 unilab 服务** 才能生效 -- 不需要重新安装或重新编译 - -### 调试 Modbus 通信问题 -如果需要调试 Modbus 通信故障,应该: -1. 临时启用 pymodbus DEBUG 日志(方法2) -2. 复现问题 -3. 查看详细的通信日志 -4. 调试完成后记得恢复 WARNING 级别 - -### 调试设备状态问题 -如果需要调试设备状态更新问题: -```python -logging.getLogger('unilabos.ros.nodes.presets.host_node').setLevel(logging.DEBUG) -``` - ---- - -## 📝 维护记录 - -| 日期 | 修改内容 | 操作人 | -|------|---------|--------| -| 2026-01-11 | 初始创建,添加 pymodbus、websockets、ROS host_node 屏蔽 | - | -| 2026-01-07 | 添加 pymodbus 和 websockets 屏蔽(log-0107.py) | - | - ---- - -## 🔗 相关文件 - -- `log.py` - 日志配置主文件 -- `unilabos/devices/workstation/coin_cell_assembly/` - 使用 Modbus 的扣电工作站代码 -- `unilabos/ros/nodes/presets/host_node.py` - ROS 主机节点代码 - ---- - -**维护提示**: 如果添加了新的第三方库或发现新的日志刷屏问题,请在此文档中记录并更新 `log.py` 配置。 diff --git a/unilabos/utils/decorator.py b/unilabos/utils/decorator.py index 667f353..57e968a 100644 --- a/unilabos/utils/decorator.py +++ b/unilabos/utils/decorator.py @@ -182,3 +182,49 @@ def get_all_subscriptions(instance) -> list: except Exception: pass return subscriptions + + +def not_action(func: F) -> F: + """ + 标记方法为非动作的装饰器 + + 用于装饰 driver 类中的方法,使其在 complete_registry 时不被识别为动作。 + 适用于辅助方法、内部工具方法等不应暴露为设备动作的公共方法。 + + Example: + class MyDriver: + @not_action + def helper_method(self): + # 这个方法不会被注册为动作 + pass + + def actual_action(self, param: str): + # 这个方法会被注册为动作 + self.helper_method() + + Note: + - 可以与其他装饰器组合使用,@not_action 应放在最外层 + - 仅影响 complete_registry 的动作识别,不影响方法的正常调用 + """ + + @wraps(func) + def wrapper(*args, **kwargs): + return func(*args, **kwargs) + + # 在函数上附加标记 + wrapper._is_not_action = True # type: ignore[attr-defined] + + return wrapper # type: ignore[return-value] + + +def is_not_action(func) -> bool: + """ + 检查函数是否被标记为非动作 + + Args: + func: 被检查的函数 + + Returns: + 如果函数被 @not_action 装饰则返回 True,否则返回 False + """ + return getattr(func, "_is_not_action", False) diff --git a/unilabos/utils/environment_check.py b/unilabos/utils/environment_check.py index 3963b9e..73c0b10 100644 --- a/unilabos/utils/environment_check.py +++ b/unilabos/utils/environment_check.py @@ -24,6 +24,7 @@ class EnvironmentChecker: "msgcenterpy": "msgcenterpy", "opentrons_shared_data": "opentrons_shared_data", "typing_extensions": "typing_extensions", + "crcmod": "crcmod-plus", } # 特殊安装包(需要特殊处理的包) diff --git a/unilabos/utils/import_manager.py b/unilabos/utils/import_manager.py index 00fcd06..2df7636 100644 --- a/unilabos/utils/import_manager.py +++ b/unilabos/utils/import_manager.py @@ -28,6 +28,7 @@ __all__ = [ from ast import Constant from unilabos.utils import logger +from unilabos.utils.decorator import is_not_action class ImportManager: @@ -275,6 +276,9 @@ class ImportManager: method_info = self._analyze_method_signature(method) result["status_methods"][actual_name] = method_info elif not name.startswith("_"): + # 检查是否被 @not_action 装饰器标记 + if is_not_action(method): + continue # 其他非_开头的方法归类为action method_info = self._analyze_method_signature(method) result["action_methods"][name] = method_info @@ -330,6 +334,9 @@ class ImportManager: if actual_name not in result["status_methods"]: result["status_methods"][actual_name] = method_info else: + # 检查是否被 @not_action 装饰器标记 + if self._is_not_action_method(node): + continue # 其他非_开头的方法归类为action result["action_methods"][method_name] = method_info return result @@ -450,6 +457,13 @@ class ImportManager: return True return False + def _is_not_action_method(self, node: ast.FunctionDef) -> bool: + """检查是否是@not_action装饰的方法""" + for decorator in node.decorator_list: + if isinstance(decorator, ast.Name) and decorator.id == "not_action": + return True + return False + def _get_property_name_from_setter(self, node: ast.FunctionDef) -> str: """从setter装饰器中获取属性名""" for decorator in node.decorator_list: diff --git a/unilabos/utils/pywinauto_util.py b/unilabos/utils/pywinauto_util.py index 3b78632..70eeb96 100644 --- a/unilabos/utils/pywinauto_util.py +++ b/unilabos/utils/pywinauto_util.py @@ -1,7 +1,11 @@ import psutil import pywinauto -from pywinauto_recorder import UIApplication -from pywinauto_recorder.player import UIPath, click, focus_on_application, exists, find, get_wrapper_path +try: + from pywinauto_recorder import UIApplication + from pywinauto_recorder.player import UIPath, click, focus_on_application, exists, find, get_wrapper_path +except ImportError: + print("未安装pywinauto_recorder,部分功能无法使用,安装时注意enum") + pass from pywinauto.controls.uiawrapper import UIAWrapper from pywinauto.application import WindowSpecification from pywinauto import findbestmatch diff --git a/unilabos/utils/requirements.txt b/unilabos/utils/requirements.txt new file mode 100644 index 0000000..65d724f --- /dev/null +++ b/unilabos/utils/requirements.txt @@ -0,0 +1,18 @@ +networkx +typing_extensions +websockets +msgcenterpy>=0.1.5 +opentrons_shared_data +pint +fastapi +jinja2 +requests +uvicorn +pyautogui +opcua +pyserial +pandas +crcmod-plus +pymodbus +matplotlib +pylibftdi \ No newline at end of file diff --git a/unilabos/workflow/common.py b/unilabos/workflow/common.py index 9bff049..ad073d9 100644 --- a/unilabos/workflow/common.py +++ b/unilabos/workflow/common.py @@ -1,3 +1,89 @@ +""" +工作流转换模块 - JSON 到 WorkflowGraph 的转换流程 + +==================== 输入格式 (JSON) ==================== + +{ + "workflow": [ + {"action": "transfer_liquid", "action_args": {"sources": "cell_lines", "targets": "Liquid_1", "asp_vol": 100.0, "dis_vol": 74.75, ...}}, + ... + ], + "reagent": { + "cell_lines": {"slot": 4, "well": ["A1", "A3", "A5"], "labware": "DRUG + YOYO-MEDIA"}, + "Liquid_1": {"slot": 1, "well": ["A4", "A7", "A10"], "labware": "rep 1"}, + ... + } +} + +==================== 转换步骤 ==================== + +第一步: 按 slot 去重创建 create_resource 节点(创建板子) +-------------------------------------------------------------------------------- +- 遍历所有 reagent,按 slot 去重,为每个唯一的 slot 创建一个板子 +- 生成参数: + res_id: plate_slot_{slot} + device_id: /PRCXI + class_name: PRCXI_BioER_96_wellplate + parent: /PRCXI/PRCXI_Deck/T{slot} + slot_on_deck: "{slot}" +- 输出端口: labware(用于连接 set_liquid_from_plate) +- 控制流: create_resource 之间通过 ready 端口串联 + +示例: slot=1, slot=4 -> 创建 2 个 create_resource 节点 + +第二步: 为每个 reagent 创建 set_liquid_from_plate 节点(设置液体) +-------------------------------------------------------------------------------- +- 遍历所有 reagent,为每个试剂创建 set_liquid_from_plate 节点 +- 生成参数: + plate: [](通过连接传递,来自 create_resource 的 labware) + well_names: ["A1", "A3", "A5"](来自 reagent 的 well 数组) + liquid_names: ["cell_lines", "cell_lines", "cell_lines"](与 well 数量一致) + volumes: [1e5, 1e5, 1e5](与 well 数量一致,默认体积) +- 输入连接: create_resource (labware) -> set_liquid_from_plate (input_plate) +- 输出端口: output_wells(用于连接 transfer_liquid) +- 控制流: set_liquid_from_plate 连接在所有 create_resource 之后,通过 ready 端口串联 + +第三步: 解析 workflow,创建 transfer_liquid 等动作节点 +-------------------------------------------------------------------------------- +- 遍历 workflow 数组,为每个动作创建步骤节点 +- 参数重命名: asp_vol -> asp_vols, dis_vol -> dis_vols, asp_flow_rate -> asp_flow_rates, dis_flow_rate -> dis_flow_rates +- 参数扩展: 根据 targets 的 wells 数量,将单值扩展为数组 + 例: asp_vol=100.0, targets 有 3 个 wells -> asp_vols=[100.0, 100.0, 100.0] +- 连接处理: 如果 sources/targets 已通过 set_liquid_from_plate 连接,参数值改为 [] +- 输入连接: set_liquid_from_plate (output_wells) -> transfer_liquid (sources_identifier / targets_identifier) +- 输出端口: sources_out, targets_out(用于连接下一个 transfer_liquid) + +==================== 连接关系图 ==================== + +控制流 (ready 端口串联): + create_resource_1 -> create_resource_2 -> ... -> set_liquid_1 -> set_liquid_2 -> ... -> transfer_liquid_1 -> transfer_liquid_2 -> ... + +物料流: + [create_resource] --labware--> [set_liquid_from_plate] --output_wells--> [transfer_liquid] --sources_out/targets_out--> [下一个 transfer_liquid] + (slot=1) (cell_lines) (input_plate) (sources_identifier) (sources_identifier) + (slot=4) (Liquid_1) (targets_identifier) (targets_identifier) + +==================== 端口映射 ==================== + +create_resource: + 输出: labware + +set_liquid_from_plate: + 输入: input_plate + 输出: output_plate, output_wells + +transfer_liquid: + 输入: sources -> sources_identifier, targets -> targets_identifier + 输出: sources -> sources_out, targets -> targets_out + +==================== 校验规则 ==================== + +- 检查 sources/targets 是否在 reagent 中定义 +- 检查 sources 和 targets 的 wells 数量是否匹配 +- 检查参数数组长度是否与 wells 数量一致 +- 如有问题,在 footer 中添加 [WARN: ...] 标记 +""" + import re import uuid @@ -8,6 +94,28 @@ from typing import Dict, List, Any, Tuple, Optional Json = Dict[str, Any] + +# ==================== 默认配置 ==================== + +# create_resource 节点默认参数 +CREATE_RESOURCE_DEFAULTS = { + "device_id": "/PRCXI", + "parent_template": "/PRCXI/PRCXI_Deck/T{slot}", # {slot} 会被替换为实际的 slot 值 + "class_name": "PRCXI_BioER_96_wellplate", +} + +# 默认液体体积 (uL) +DEFAULT_LIQUID_VOLUME = 1e5 + +# 参数重命名映射:单数 -> 复数(用于 transfer_liquid 等动作) +PARAM_RENAME_MAPPING = { + "asp_vol": "asp_vols", + "dis_vol": "dis_vols", + "asp_flow_rate": "asp_flow_rates", + "dis_flow_rate": "dis_flow_rates", +} + + # ---------------- Graph ---------------- @@ -228,7 +336,7 @@ def refactor_data( def build_protocol_graph( - labware_info: List[Dict[str, Any]], + labware_info: Dict[str, Dict[str, Any]], protocol_steps: List[Dict[str, Any]], workstation_name: str, action_resource_mapping: Optional[Dict[str, str]] = None, @@ -236,112 +344,227 @@ def build_protocol_graph( """统一的协议图构建函数,根据设备类型自动选择构建逻辑 Args: - labware_info: labware 信息字典 + labware_info: labware 信息字典,格式为 {name: {slot, well, labware, ...}, ...} protocol_steps: 协议步骤列表 workstation_name: 工作站名称 action_resource_mapping: action 到 resource_name 的映射字典,可选 """ G = WorkflowGraph() - resource_last_writer = {} + resource_last_writer = {} # reagent_name -> "node_id:port" + slot_to_create_resource = {} # slot -> create_resource node_id protocol_steps = refactor_data(protocol_steps, action_resource_mapping) - # 有机化学&移液站协议图构建 - WORKSTATION_ID = workstation_name - # 为所有labware创建资源节点 - res_index = 0 + # ==================== 第一步:按 slot 去重创建 create_resource 节点 ==================== + # 收集所有唯一的 slot + slots_info = {} # slot -> {labware, res_id} for labware_id, item in labware_info.items(): - # item_id = item.get("id") or item.get("name", f"item_{uuid.uuid4()}") - node_id = str(uuid.uuid4()) + slot = str(item.get("slot", "")) + if slot and slot not in slots_info: + res_id = f"plate_slot_{slot}" + slots_info[slot] = { + "labware": item.get("labware", ""), + "res_id": res_id, + } - # 判断节点类型 - if "Rack" in str(labware_id) or "Tip" in str(labware_id): - lab_node_type = "Labware" - description = f"Prepare Labware: {labware_id}" - liquid_type = [] - liquid_volume = [] - elif item.get("type") == "hardware" or "reactor" in str(labware_id).lower(): - if "reactor" not in str(labware_id).lower(): - continue - lab_node_type = "Sample" - description = f"Prepare Reactor: {labware_id}" - liquid_type = [] - liquid_volume = [] - else: - lab_node_type = "Reagent" - description = f"Add Reagent to Flask: {labware_id}" - liquid_type = [labware_id] - liquid_volume = [1e5] + # 为每个唯一的 slot 创建 create_resource 节点 + res_index = 0 + last_create_resource_id = None + for slot, info in slots_info.items(): + node_id = str(uuid.uuid4()) + res_id = info["res_id"] res_index += 1 G.add_node( node_id, template_name="create_resource", resource_name="host_node", - name=f"Res {res_index}", - description=description, - lab_node_type=lab_node_type, + name=f"Plate {res_index}", + description=f"Create plate on slot {slot}", + lab_node_type="Labware", footer="create_resource-host_node", param={ - "res_id": labware_id, - "device_id": WORKSTATION_ID, - "class_name": "container", - "parent": WORKSTATION_ID, + "res_id": res_id, + "device_id": CREATE_RESOURCE_DEFAULTS["device_id"], + "class_name": CREATE_RESOURCE_DEFAULTS["class_name"], + "parent": CREATE_RESOURCE_DEFAULTS["parent_template"].format(slot=slot), "bind_locations": {"x": 0.0, "y": 0.0, "z": 0.0}, - "liquid_input_slot": [-1], - "liquid_type": liquid_type, - "liquid_volume": liquid_volume, - "slot_on_deck": "", + "slot_on_deck": slot, }, ) - resource_last_writer[labware_id] = f"{node_id}:labware" + slot_to_create_resource[slot] = node_id - last_control_node_id = None + # create_resource 之间通过 ready 串联 + if last_create_resource_id is not None: + G.add_edge(last_create_resource_id, node_id, source_port="ready", target_port="ready") + last_create_resource_id = node_id + + # ==================== 第二步:为每个 reagent 创建 set_liquid_from_plate 节点 ==================== + set_liquid_index = 0 + last_set_liquid_id = last_create_resource_id # set_liquid_from_plate 连接在 create_resource 之后 + + for labware_id, item in labware_info.items(): + # 跳过 Tip/Rack 类型 + if "Rack" in str(labware_id) or "Tip" in str(labware_id): + continue + if item.get("type") == "hardware": + continue + + slot = str(item.get("slot", "")) + wells = item.get("well", []) + if not wells or not slot: + continue + + # res_id 不能有空格 + res_id = str(labware_id).replace(" ", "_") + well_count = len(wells) + + node_id = str(uuid.uuid4()) + set_liquid_index += 1 + + G.add_node( + node_id, + template_name="set_liquid_from_plate", + resource_name="liquid_handler.prcxi", + name=f"SetLiquid {set_liquid_index}", + description=f"Set liquid: {labware_id}", + lab_node_type="Reagent", + footer="set_liquid_from_plate-liquid_handler.prcxi", + param={ + "plate": [], # 通过连接传递 + "well_names": wells, # 孔位名数组,如 ["A1", "A3", "A5"] + "liquid_names": [res_id] * well_count, + "volumes": [DEFAULT_LIQUID_VOLUME] * well_count, + }, + ) + + # ready 连接:上一个节点 -> set_liquid_from_plate + if last_set_liquid_id is not None: + G.add_edge(last_set_liquid_id, node_id, source_port="ready", target_port="ready") + last_set_liquid_id = node_id + + # 物料流:create_resource 的 labware -> set_liquid_from_plate 的 input_plate + create_res_node_id = slot_to_create_resource.get(slot) + if create_res_node_id: + G.add_edge(create_res_node_id, node_id, source_port="labware", target_port="input_plate") + + # set_liquid_from_plate 的输出 output_wells 用于连接 transfer_liquid + resource_last_writer[labware_id] = f"{node_id}:output_wells" + + last_control_node_id = last_set_liquid_id + + # 端口名称映射:JSON 字段名 -> 实际 handle key + INPUT_PORT_MAPPING = { + "sources": "sources_identifier", + "targets": "targets_identifier", + "vessel": "vessel", + "to_vessel": "to_vessel", + "from_vessel": "from_vessel", + "reagent": "reagent", + "solvent": "solvent", + "compound": "compound", + } + + OUTPUT_PORT_MAPPING = { + "sources": "sources_out", # 输出端口是 xxx_out + "targets": "targets_out", # 输出端口是 xxx_out + "vessel": "vessel_out", + "to_vessel": "to_vessel_out", + "from_vessel": "from_vessel_out", + "filtrate_vessel": "filtrate_out", + "reagent": "reagent", + "solvent": "solvent", + "compound": "compound", + } + + # 需要根据 wells 数量扩展的参数列表(复数形式) + EXPAND_BY_WELLS_PARAMS = ["asp_vols", "dis_vols", "asp_flow_rates", "dis_flow_rates"] # 处理协议步骤 for step in protocol_steps: node_id = str(uuid.uuid4()) - G.add_node(node_id, **step) + params = step.get("param", {}).copy() # 复制一份,避免修改原数据 + connected_params = set() # 记录被连接的参数 + warnings = [] # 收集警告信息 + + # 参数重命名:单数 -> 复数 + for old_name, new_name in PARAM_RENAME_MAPPING.items(): + if old_name in params: + params[new_name] = params.pop(old_name) + + # 处理输入连接 + for param_key, target_port in INPUT_PORT_MAPPING.items(): + resource_name = params.get(param_key) + if resource_name and resource_name in resource_last_writer: + source_node, source_port = resource_last_writer[resource_name].split(":") + G.add_edge(source_node, node_id, source_port=source_port, target_port=target_port) + connected_params.add(param_key) + elif resource_name and resource_name not in resource_last_writer: + # 资源名在 labware_info 中不存在 + warnings.append(f"{param_key}={resource_name} 未找到") + + # 获取 targets 对应的 wells 数量,用于扩展参数 + targets_name = params.get("targets") + sources_name = params.get("sources") + targets_wells_count = 1 + sources_wells_count = 1 + + if targets_name and targets_name in labware_info: + target_wells = labware_info[targets_name].get("well", []) + targets_wells_count = len(target_wells) if target_wells else 1 + elif targets_name: + warnings.append(f"targets={targets_name} 未在 reagent 中定义") + + if sources_name and sources_name in labware_info: + source_wells = labware_info[sources_name].get("well", []) + sources_wells_count = len(source_wells) if source_wells else 1 + elif sources_name: + warnings.append(f"sources={sources_name} 未在 reagent 中定义") + + # 检查 sources 和 targets 的 wells 数量是否匹配 + if targets_wells_count != sources_wells_count and targets_name and sources_name: + warnings.append(f"wells 数量不匹配: sources={sources_wells_count}, targets={targets_wells_count}") + + # 使用 targets 的 wells 数量来扩展参数 + wells_count = targets_wells_count + + # 扩展单值参数为数组(根据 targets 的 wells 数量) + for expand_param in EXPAND_BY_WELLS_PARAMS: + if expand_param in params: + value = params[expand_param] + # 如果是单个值,扩展为数组 + if not isinstance(value, list): + params[expand_param] = [value] * wells_count + # 如果已经是数组但长度不对,记录警告 + elif len(value) != wells_count: + warnings.append(f"{expand_param} 数量({len(value)})与 wells({wells_count})不匹配") + + # 如果 sources/targets 已通过连接传递,将参数值改为空数组 + for param_key in connected_params: + if param_key in params: + params[param_key] = [] + + # 更新 step 的 param 和 footer + step_copy = step.copy() + step_copy["param"] = params + + # 如果有警告,修改 footer 添加警告标记(警告放前面) + if warnings: + original_footer = step.get("footer", "") + step_copy["footer"] = f"[WARN: {'; '.join(warnings)}] {original_footer}" + + G.add_node(node_id, **step_copy) # 控制流 if last_control_node_id is not None: G.add_edge(last_control_node_id, node_id, source_port="ready", target_port="ready") last_control_node_id = node_id - # 物料流 - params = step.get("param", {}) - input_resources_possible_names = [ - "vessel", - "to_vessel", - "from_vessel", - "reagent", - "solvent", - "compound", - "sources", - "targets", - ] - - for target_port in input_resources_possible_names: - resource_name = params.get(target_port) - if resource_name and resource_name in resource_last_writer: - source_node, source_port = resource_last_writer[resource_name].split(":") - G.add_edge(source_node, node_id, source_port=source_port, target_port=target_port) - - output_resources = { - "vessel_out": params.get("vessel"), - "from_vessel_out": params.get("from_vessel"), - "to_vessel_out": params.get("to_vessel"), - "filtrate_out": params.get("filtrate_vessel"), - "reagent": params.get("reagent"), - "solvent": params.get("solvent"), - "compound": params.get("compound"), - "sources_out": params.get("sources"), - "targets_out": params.get("targets"), - } - - for source_port, resource_name in output_resources.items(): + # 处理输出:更新 resource_last_writer + for param_key, output_port in OUTPUT_PORT_MAPPING.items(): + resource_name = step.get("param", {}).get(param_key) # 使用原始参数值 if resource_name: - resource_last_writer[resource_name] = f"{node_id}:{source_port}" + resource_last_writer[resource_name] = f"{node_id}:{output_port}" return G diff --git a/unilabos/workflow/convert_from_json.py b/unilabos/workflow/convert_from_json.py index 7a6d2b4..ff749d7 100644 --- a/unilabos/workflow/convert_from_json.py +++ b/unilabos/workflow/convert_from_json.py @@ -1,21 +1,68 @@ """ JSON 工作流转换模块 -提供从多种 JSON 格式转换为统一工作流格式的功能。 -支持的格式: -1. workflow/reagent 格式 -2. steps_info/labware_info 格式 +将 workflow/reagent 格式的 JSON 转换为统一工作流格式。 + +输入格式: +{ + "workflow": [ + {"action": "...", "action_args": {...}}, + ... + ], + "reagent": { + "reagent_name": {"slot": int, "well": [...], "labware": "..."}, + ... + } +} """ import json from os import PathLike from pathlib import Path -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple, Union from unilabos.workflow.common import WorkflowGraph, build_protocol_graph from unilabos.registry.registry import lab_registry +# ==================== 字段映射配置 ==================== + +# action 到 resource_name 的映射 +ACTION_RESOURCE_MAPPING: Dict[str, str] = { + # 生物实验操作 + "transfer_liquid": "liquid_handler.prcxi", + "transfer": "liquid_handler.prcxi", + "incubation": "incubator.prcxi", + "move_labware": "labware_mover.prcxi", + "oscillation": "shaker.prcxi", + # 有机化学操作 + "HeatChillToTemp": "heatchill.chemputer", + "StopHeatChill": "heatchill.chemputer", + "StartHeatChill": "heatchill.chemputer", + "HeatChill": "heatchill.chemputer", + "Dissolve": "stirrer.chemputer", + "Transfer": "liquid_handler.chemputer", + "Evaporate": "rotavap.chemputer", + "Recrystallize": "reactor.chemputer", + "Filter": "filter.chemputer", + "Dry": "dryer.chemputer", + "Add": "liquid_handler.chemputer", +} + +# action_args 字段到 parameters 字段的映射 +# 格式: {"old_key": "new_key"}, 仅映射需要重命名的字段 +ARGS_FIELD_MAPPING: Dict[str, str] = { + # 如果需要字段重命名,在这里配置 + # "old_field_name": "new_field_name", +} + +# 默认工作站名称 +DEFAULT_WORKSTATION = "PRCXI" + + +# ==================== 核心转换函数 ==================== + + def get_action_handles(resource_name: str, template_name: str) -> Dict[str, List[str]]: """ 从 registry 获取指定设备和动作的 handles 配置 @@ -39,12 +86,10 @@ def get_action_handles(resource_name: str, template_name: str) -> Dict[str, List handles = action_config.get("handles", {}) if isinstance(handles, dict): - # 处理 input handles (作为 target) for handle in handles.get("input", []): handler_key = handle.get("handler_key", "") if handler_key: result["source"].append(handler_key) - # 处理 output handles (作为 source) for handle in handles.get("output", []): handler_key = handle.get("handler_key", "") if handler_key: @@ -69,12 +114,9 @@ def validate_workflow_handles(graph: WorkflowGraph) -> Tuple[bool, List[str]]: for edge in graph.edges: left_uuid = edge.get("source") right_uuid = edge.get("target") - # target_handle_key是target, right的输入节点(入节点) - # source_handle_key是source, left的输出节点(出节点) right_source_conn_key = edge.get("target_handle_key", "") left_target_conn_key = edge.get("source_handle_key", "") - # 获取源节点和目标节点信息 left_node = nodes.get(left_uuid, {}) right_node = nodes.get(right_uuid, {}) @@ -83,164 +125,93 @@ def validate_workflow_handles(graph: WorkflowGraph) -> Tuple[bool, List[str]]: right_res_name = right_node.get("resource_name", "") right_template_name = right_node.get("template_name", "") - # 获取源节点的 output handles left_node_handles = get_action_handles(left_res_name, left_template_name) target_valid_keys = left_node_handles.get("target", []) target_valid_keys.append("ready") - # 获取目标节点的 input handles right_node_handles = get_action_handles(right_res_name, right_template_name) source_valid_keys = right_node_handles.get("source", []) source_valid_keys.append("ready") - # 如果节点配置了 output handles,则 source_port 必须有效 + # 验证目标节点(right)的输入端口 if not right_source_conn_key: - node_name = left_node.get("name", left_uuid[:8]) - errors.append(f"源节点 '{node_name}' 的 source_handle_key 为空," f"应设置为: {source_valid_keys}") + node_name = right_node.get("name", right_uuid[:8]) + errors.append(f"目标节点 '{node_name}' 的输入端口 (target_handle_key) 为空,应设置为: {source_valid_keys}") elif right_source_conn_key not in source_valid_keys: - node_name = left_node.get("name", left_uuid[:8]) + node_name = right_node.get("name", right_uuid[:8]) errors.append( - f"源节点 '{node_name}' 的 source 端点 '{right_source_conn_key}' 不存在," f"支持的端点: {source_valid_keys}" + f"目标节点 '{node_name}' 的输入端口 '{right_source_conn_key}' 不存在,支持的输入端口: {source_valid_keys}" ) - # 如果节点配置了 input handles,则 target_port 必须有效 + # 验证源节点(left)的输出端口 if not left_target_conn_key: - node_name = right_node.get("name", right_uuid[:8]) - errors.append(f"目标节点 '{node_name}' 的 target_handle_key 为空," f"应设置为: {target_valid_keys}") + node_name = left_node.get("name", left_uuid[:8]) + errors.append(f"源节点 '{node_name}' 的输出端口 (source_handle_key) 为空,应设置为: {target_valid_keys}") elif left_target_conn_key not in target_valid_keys: - node_name = right_node.get("name", right_uuid[:8]) + node_name = left_node.get("name", left_uuid[:8]) errors.append( - f"目标节点 '{node_name}' 的 target 端点 '{left_target_conn_key}' 不存在," - f"支持的端点: {target_valid_keys}" + f"源节点 '{node_name}' 的输出端口 '{left_target_conn_key}' 不存在,支持的输出端口: {target_valid_keys}" ) return len(errors) == 0, errors -# action 到 resource_name 的映射 -ACTION_RESOURCE_MAPPING: Dict[str, str] = { - # 生物实验操作 - "transfer_liquid": "liquid_handler.prcxi", - "transfer": "liquid_handler.prcxi", - "incubation": "incubator.prcxi", - "move_labware": "labware_mover.prcxi", - "oscillation": "shaker.prcxi", - # 有机化学操作 - "HeatChillToTemp": "heatchill.chemputer", - "StopHeatChill": "heatchill.chemputer", - "StartHeatChill": "heatchill.chemputer", - "HeatChill": "heatchill.chemputer", - "Dissolve": "stirrer.chemputer", - "Transfer": "liquid_handler.chemputer", - "Evaporate": "rotavap.chemputer", - "Recrystallize": "reactor.chemputer", - "Filter": "filter.chemputer", - "Dry": "dryer.chemputer", - "Add": "liquid_handler.chemputer", -} - - -def normalize_steps(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]: +def normalize_workflow_steps(workflow: List[Dict[str, Any]]) -> List[Dict[str, Any]]: """ - 将不同格式的步骤数据规范化为统一格式 + 将 workflow 格式的步骤数据规范化 - 支持的输入格式: - - action + parameters - - action + action_args - - operation + parameters + 输入格式: + [{"action": "...", "action_args": {...}}, ...] + + 输出格式: + [{"action": "...", "parameters": {...}, "step_number": int}, ...] Args: - data: 原始步骤数据列表 + workflow: workflow 数组 Returns: - 规范化后的步骤列表,格式为 [{"action": str, "parameters": dict, "description": str?, "step_number": int?}, ...] + 规范化后的步骤列表 """ normalized = [] - for idx, step in enumerate(data): - # 获取动作名称(支持 action 或 operation 字段) - action = step.get("action") or step.get("operation") + for idx, step in enumerate(workflow): + action = step.get("action") if not action: continue - # 获取参数(支持 parameters 或 action_args 字段) - raw_params = step.get("parameters") or step.get("action_args") or {} - params = dict(raw_params) + # 获取参数: action_args + raw_params = step.get("action_args", {}) + params = {} - # 规范化 source/target -> sources/targets - if "source" in raw_params and "sources" not in raw_params: - params["sources"] = raw_params["source"] - if "target" in raw_params and "targets" not in raw_params: - params["targets"] = raw_params["target"] + # 应用字段映射 + for key, value in raw_params.items(): + mapped_key = ARGS_FIELD_MAPPING.get(key, key) + params[mapped_key] = value - # 获取描述(支持 description 或 purpose 字段) - description = step.get("description") or step.get("purpose") + step_dict = { + "action": action, + "parameters": params, + "step_number": idx + 1, + } - # 获取步骤编号(优先使用原始数据中的 step_number,否则使用索引+1) - step_number = step.get("step_number", idx + 1) - - step_dict = {"action": action, "parameters": params, "step_number": step_number} - if description: - step_dict["description"] = description + # 保留描述字段 + if "description" in step: + step_dict["description"] = step["description"] normalized.append(step_dict) return normalized -def normalize_labware(data: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]: - """ - 将不同格式的 labware 数据规范化为统一的字典格式 - - 支持的输入格式: - - reagent_name + material_name + positions - - name + labware + slot - - Args: - data: 原始 labware 数据列表 - - Returns: - 规范化后的 labware 字典,格式为 {name: {"slot": int, "labware": str, "well": list, "type": str, "role": str, "name": str}, ...} - """ - labware = {} - for item in data: - # 获取 key 名称(优先使用 reagent_name,其次是 material_name 或 name) - reagent_name = item.get("reagent_name") - key = reagent_name or item.get("material_name") or item.get("name") - if not key: - continue - - key = str(key) - - # 处理重复 key,自动添加后缀 - idx = 1 - original_key = key - while key in labware: - idx += 1 - key = f"{original_key}_{idx}" - - labware[key] = { - "slot": item.get("positions") or item.get("slot"), - "labware": item.get("material_name") or item.get("labware"), - "well": item.get("well", []), - "type": item.get("type", "reagent"), - "role": item.get("role", ""), - "name": key, - } - - return labware - - def convert_from_json( data: Union[str, PathLike, Dict[str, Any]], - workstation_name: str = "PRCXi", + workstation_name: str = DEFAULT_WORKSTATION, validate: bool = True, ) -> WorkflowGraph: """ 从 JSON 数据或文件转换为 WorkflowGraph - 支持的 JSON 格式: - 1. {"workflow": [...], "reagent": {...}} - 直接格式 - 2. {"steps_info": [...], "labware_info": [...]} - 需要规范化的格式 + JSON 格式: + {"workflow": [...], "reagent": {...}} Args: data: JSON 文件路径、字典数据、或 JSON 字符串 @@ -251,7 +222,7 @@ def convert_from_json( WorkflowGraph: 构建好的工作流图 Raises: - ValueError: 不支持的 JSON 格式 或 句柄校验失败 + ValueError: 不支持的 JSON 格式 FileNotFoundError: 文件不存在 json.JSONDecodeError: JSON 解析失败 """ @@ -262,7 +233,6 @@ def convert_from_json( with path.open("r", encoding="utf-8") as fp: json_data = json.load(fp) elif isinstance(data, str): - # 尝试作为 JSON 字符串解析 json_data = json.loads(data) else: raise FileNotFoundError(f"文件不存在: {data}") @@ -271,30 +241,24 @@ def convert_from_json( else: raise TypeError(f"不支持的数据类型: {type(data)}") - # 根据格式解析数据 - if "workflow" in json_data and "reagent" in json_data: - # 格式1: workflow/reagent(已经是规范格式) - protocol_steps = json_data["workflow"] - labware_info = json_data["reagent"] - elif "steps_info" in json_data and "labware_info" in json_data: - # 格式2: steps_info/labware_info(需要规范化) - protocol_steps = normalize_steps(json_data["steps_info"]) - labware_info = normalize_labware(json_data["labware_info"]) - elif "steps" in json_data and "labware" in json_data: - # 格式3: steps/labware(另一种常见格式) - protocol_steps = normalize_steps(json_data["steps"]) - if isinstance(json_data["labware"], list): - labware_info = normalize_labware(json_data["labware"]) - else: - labware_info = json_data["labware"] - else: + # 校验格式 + if "workflow" not in json_data or "reagent" not in json_data: raise ValueError( - "不支持的 JSON 格式。支持的格式:\n" - "1. {'workflow': [...], 'reagent': {...}}\n" - "2. {'steps_info': [...], 'labware_info': [...]}\n" - "3. {'steps': [...], 'labware': [...]}" + "不支持的 JSON 格式。请使用标准格式:\n" + '{"workflow": [{"action": "...", "action_args": {...}}, ...], ' + '"reagent": {"name": {"slot": int, "well": [...], "labware": "..."}, ...}}' ) + # 提取数据 + workflow = json_data["workflow"] + reagent = json_data["reagent"] + + # 规范化步骤数据 + protocol_steps = normalize_workflow_steps(workflow) + + # reagent 已经是字典格式,直接使用 + labware_info = reagent + # 构建工作流图 graph = build_protocol_graph( labware_info=labware_info, @@ -317,7 +281,7 @@ def convert_from_json( def convert_json_to_node_link( data: Union[str, PathLike, Dict[str, Any]], - workstation_name: str = "PRCXi", + workstation_name: str = DEFAULT_WORKSTATION, ) -> Dict[str, Any]: """ 将 JSON 数据转换为 node-link 格式的字典 @@ -335,7 +299,7 @@ def convert_json_to_node_link( def convert_json_to_workflow_list( data: Union[str, PathLike, Dict[str, Any]], - workstation_name: str = "PRCXi", + workstation_name: str = DEFAULT_WORKSTATION, ) -> List[Dict[str, Any]]: """ 将 JSON 数据转换为工作流列表格式 @@ -349,8 +313,3 @@ def convert_json_to_workflow_list( """ graph = convert_from_json(data, workstation_name) return graph.to_dict() - - -# 为了向后兼容,保留下划线前缀的别名 -_normalize_steps = normalize_steps -_normalize_labware = normalize_labware diff --git a/unilabos/workflow/legacy/convert_from_json_legacy.py b/unilabos/workflow/legacy/convert_from_json_legacy.py new file mode 100644 index 0000000..7a6d2b4 --- /dev/null +++ b/unilabos/workflow/legacy/convert_from_json_legacy.py @@ -0,0 +1,356 @@ +""" +JSON 工作流转换模块 + +提供从多种 JSON 格式转换为统一工作流格式的功能。 +支持的格式: +1. workflow/reagent 格式 +2. steps_info/labware_info 格式 +""" + +import json +from os import PathLike +from pathlib import Path +from typing import Any, Dict, List, Optional, Set, Tuple, Union + +from unilabos.workflow.common import WorkflowGraph, build_protocol_graph +from unilabos.registry.registry import lab_registry + + +def get_action_handles(resource_name: str, template_name: str) -> Dict[str, List[str]]: + """ + 从 registry 获取指定设备和动作的 handles 配置 + + Args: + resource_name: 设备资源名称,如 "liquid_handler.prcxi" + template_name: 动作模板名称,如 "transfer_liquid" + + Returns: + 包含 source 和 target handler_keys 的字典: + {"source": ["sources_out", "targets_out", ...], "target": ["sources", "targets", ...]} + """ + result = {"source": [], "target": []} + + device_info = lab_registry.device_type_registry.get(resource_name, {}) + if not device_info: + return result + + action_mappings = device_info.get("class", {}).get("action_value_mappings", {}) + action_config = action_mappings.get(template_name, {}) + handles = action_config.get("handles", {}) + + if isinstance(handles, dict): + # 处理 input handles (作为 target) + for handle in handles.get("input", []): + handler_key = handle.get("handler_key", "") + if handler_key: + result["source"].append(handler_key) + # 处理 output handles (作为 source) + for handle in handles.get("output", []): + handler_key = handle.get("handler_key", "") + if handler_key: + result["target"].append(handler_key) + + return result + + +def validate_workflow_handles(graph: WorkflowGraph) -> Tuple[bool, List[str]]: + """ + 校验工作流图中所有边的句柄配置是否正确 + + Args: + graph: 工作流图对象 + + Returns: + (is_valid, errors): 是否有效,错误信息列表 + """ + errors = [] + nodes = graph.nodes + + for edge in graph.edges: + left_uuid = edge.get("source") + right_uuid = edge.get("target") + # target_handle_key是target, right的输入节点(入节点) + # source_handle_key是source, left的输出节点(出节点) + right_source_conn_key = edge.get("target_handle_key", "") + left_target_conn_key = edge.get("source_handle_key", "") + + # 获取源节点和目标节点信息 + left_node = nodes.get(left_uuid, {}) + right_node = nodes.get(right_uuid, {}) + + left_res_name = left_node.get("resource_name", "") + left_template_name = left_node.get("template_name", "") + right_res_name = right_node.get("resource_name", "") + right_template_name = right_node.get("template_name", "") + + # 获取源节点的 output handles + left_node_handles = get_action_handles(left_res_name, left_template_name) + target_valid_keys = left_node_handles.get("target", []) + target_valid_keys.append("ready") + + # 获取目标节点的 input handles + right_node_handles = get_action_handles(right_res_name, right_template_name) + source_valid_keys = right_node_handles.get("source", []) + source_valid_keys.append("ready") + + # 如果节点配置了 output handles,则 source_port 必须有效 + if not right_source_conn_key: + node_name = left_node.get("name", left_uuid[:8]) + errors.append(f"源节点 '{node_name}' 的 source_handle_key 为空," f"应设置为: {source_valid_keys}") + elif right_source_conn_key not in source_valid_keys: + node_name = left_node.get("name", left_uuid[:8]) + errors.append( + f"源节点 '{node_name}' 的 source 端点 '{right_source_conn_key}' 不存在," f"支持的端点: {source_valid_keys}" + ) + + # 如果节点配置了 input handles,则 target_port 必须有效 + if not left_target_conn_key: + node_name = right_node.get("name", right_uuid[:8]) + errors.append(f"目标节点 '{node_name}' 的 target_handle_key 为空," f"应设置为: {target_valid_keys}") + elif left_target_conn_key not in target_valid_keys: + node_name = right_node.get("name", right_uuid[:8]) + errors.append( + f"目标节点 '{node_name}' 的 target 端点 '{left_target_conn_key}' 不存在," + f"支持的端点: {target_valid_keys}" + ) + + return len(errors) == 0, errors + + +# action 到 resource_name 的映射 +ACTION_RESOURCE_MAPPING: Dict[str, str] = { + # 生物实验操作 + "transfer_liquid": "liquid_handler.prcxi", + "transfer": "liquid_handler.prcxi", + "incubation": "incubator.prcxi", + "move_labware": "labware_mover.prcxi", + "oscillation": "shaker.prcxi", + # 有机化学操作 + "HeatChillToTemp": "heatchill.chemputer", + "StopHeatChill": "heatchill.chemputer", + "StartHeatChill": "heatchill.chemputer", + "HeatChill": "heatchill.chemputer", + "Dissolve": "stirrer.chemputer", + "Transfer": "liquid_handler.chemputer", + "Evaporate": "rotavap.chemputer", + "Recrystallize": "reactor.chemputer", + "Filter": "filter.chemputer", + "Dry": "dryer.chemputer", + "Add": "liquid_handler.chemputer", +} + + +def normalize_steps(data: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """ + 将不同格式的步骤数据规范化为统一格式 + + 支持的输入格式: + - action + parameters + - action + action_args + - operation + parameters + + Args: + data: 原始步骤数据列表 + + Returns: + 规范化后的步骤列表,格式为 [{"action": str, "parameters": dict, "description": str?, "step_number": int?}, ...] + """ + normalized = [] + for idx, step in enumerate(data): + # 获取动作名称(支持 action 或 operation 字段) + action = step.get("action") or step.get("operation") + if not action: + continue + + # 获取参数(支持 parameters 或 action_args 字段) + raw_params = step.get("parameters") or step.get("action_args") or {} + params = dict(raw_params) + + # 规范化 source/target -> sources/targets + if "source" in raw_params and "sources" not in raw_params: + params["sources"] = raw_params["source"] + if "target" in raw_params and "targets" not in raw_params: + params["targets"] = raw_params["target"] + + # 获取描述(支持 description 或 purpose 字段) + description = step.get("description") or step.get("purpose") + + # 获取步骤编号(优先使用原始数据中的 step_number,否则使用索引+1) + step_number = step.get("step_number", idx + 1) + + step_dict = {"action": action, "parameters": params, "step_number": step_number} + if description: + step_dict["description"] = description + + normalized.append(step_dict) + + return normalized + + +def normalize_labware(data: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]: + """ + 将不同格式的 labware 数据规范化为统一的字典格式 + + 支持的输入格式: + - reagent_name + material_name + positions + - name + labware + slot + + Args: + data: 原始 labware 数据列表 + + Returns: + 规范化后的 labware 字典,格式为 {name: {"slot": int, "labware": str, "well": list, "type": str, "role": str, "name": str}, ...} + """ + labware = {} + for item in data: + # 获取 key 名称(优先使用 reagent_name,其次是 material_name 或 name) + reagent_name = item.get("reagent_name") + key = reagent_name or item.get("material_name") or item.get("name") + if not key: + continue + + key = str(key) + + # 处理重复 key,自动添加后缀 + idx = 1 + original_key = key + while key in labware: + idx += 1 + key = f"{original_key}_{idx}" + + labware[key] = { + "slot": item.get("positions") or item.get("slot"), + "labware": item.get("material_name") or item.get("labware"), + "well": item.get("well", []), + "type": item.get("type", "reagent"), + "role": item.get("role", ""), + "name": key, + } + + return labware + + +def convert_from_json( + data: Union[str, PathLike, Dict[str, Any]], + workstation_name: str = "PRCXi", + validate: bool = True, +) -> WorkflowGraph: + """ + 从 JSON 数据或文件转换为 WorkflowGraph + + 支持的 JSON 格式: + 1. {"workflow": [...], "reagent": {...}} - 直接格式 + 2. {"steps_info": [...], "labware_info": [...]} - 需要规范化的格式 + + Args: + data: JSON 文件路径、字典数据、或 JSON 字符串 + workstation_name: 工作站名称,默认 "PRCXi" + validate: 是否校验句柄配置,默认 True + + Returns: + WorkflowGraph: 构建好的工作流图 + + Raises: + ValueError: 不支持的 JSON 格式 或 句柄校验失败 + FileNotFoundError: 文件不存在 + json.JSONDecodeError: JSON 解析失败 + """ + # 处理输入数据 + if isinstance(data, (str, PathLike)): + path = Path(data) + if path.exists(): + with path.open("r", encoding="utf-8") as fp: + json_data = json.load(fp) + elif isinstance(data, str): + # 尝试作为 JSON 字符串解析 + json_data = json.loads(data) + else: + raise FileNotFoundError(f"文件不存在: {data}") + elif isinstance(data, dict): + json_data = data + else: + raise TypeError(f"不支持的数据类型: {type(data)}") + + # 根据格式解析数据 + if "workflow" in json_data and "reagent" in json_data: + # 格式1: workflow/reagent(已经是规范格式) + protocol_steps = json_data["workflow"] + labware_info = json_data["reagent"] + elif "steps_info" in json_data and "labware_info" in json_data: + # 格式2: steps_info/labware_info(需要规范化) + protocol_steps = normalize_steps(json_data["steps_info"]) + labware_info = normalize_labware(json_data["labware_info"]) + elif "steps" in json_data and "labware" in json_data: + # 格式3: steps/labware(另一种常见格式) + protocol_steps = normalize_steps(json_data["steps"]) + if isinstance(json_data["labware"], list): + labware_info = normalize_labware(json_data["labware"]) + else: + labware_info = json_data["labware"] + else: + raise ValueError( + "不支持的 JSON 格式。支持的格式:\n" + "1. {'workflow': [...], 'reagent': {...}}\n" + "2. {'steps_info': [...], 'labware_info': [...]}\n" + "3. {'steps': [...], 'labware': [...]}" + ) + + # 构建工作流图 + graph = build_protocol_graph( + labware_info=labware_info, + protocol_steps=protocol_steps, + workstation_name=workstation_name, + action_resource_mapping=ACTION_RESOURCE_MAPPING, + ) + + # 校验句柄配置 + if validate: + is_valid, errors = validate_workflow_handles(graph) + if not is_valid: + import warnings + + for error in errors: + warnings.warn(f"句柄校验警告: {error}") + + return graph + + +def convert_json_to_node_link( + data: Union[str, PathLike, Dict[str, Any]], + workstation_name: str = "PRCXi", +) -> Dict[str, Any]: + """ + 将 JSON 数据转换为 node-link 格式的字典 + + Args: + data: JSON 文件路径、字典数据、或 JSON 字符串 + workstation_name: 工作站名称,默认 "PRCXi" + + Returns: + Dict: node-link 格式的工作流数据 + """ + graph = convert_from_json(data, workstation_name) + return graph.to_node_link_dict() + + +def convert_json_to_workflow_list( + data: Union[str, PathLike, Dict[str, Any]], + workstation_name: str = "PRCXi", +) -> List[Dict[str, Any]]: + """ + 将 JSON 数据转换为工作流列表格式 + + Args: + data: JSON 文件路径、字典数据、或 JSON 字符串 + workstation_name: 工作站名称,默认 "PRCXi" + + Returns: + List: 工作流节点列表 + """ + graph = convert_from_json(data, workstation_name) + return graph.to_dict() + + +# 为了向后兼容,保留下划线前缀的别名 +_normalize_steps = normalize_steps +_normalize_labware = normalize_labware diff --git a/unilabos_msgs/package.xml b/unilabos_msgs/package.xml index b9c2632..68ad132 100644 --- a/unilabos_msgs/package.xml +++ b/unilabos_msgs/package.xml @@ -2,7 +2,7 @@ unilabos_msgs - 0.10.15 + 0.10.17 ROS2 Messages package for unilabos devices Junhan Chang Xuwznln