<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0"
  xmlns:atom="http://www.w3.org/2005/Atom"
  xmlns:dc="http://purl.org/dc/elements/1.1/">
<channel>
  <title>Pulsar 照見 · VLA 新文章</title>
  <link>https://sou350121.github.io/pulsar-web/vla-deepdive</link>
  <atom:link href="https://sou350121.github.io/pulsar-web/rss/vla-theory.xml" rel="self" type="application/rss+xml" />
  <description>VLA-Handbook 理论章节每日新增文章 — 每日 10:00-17:00 Pulsar 照见自动生成 · 涵盖架构、扩散、世界模型、强化学习、触觉、感知、规划、基础、部署、前沿 10 个主题方向。</description>
  <language>zh-CN</language>
  <ttl>60</ttl>
  <lastBuildDate>Mon, 20 Apr 2026 04:53:39 GMT</lastBuildDate>
  <generator>Pulsar 照見 · https://github.com/sou350121/pulsar-web</generator>
  <copyright>Content under CC BY 4.0 — sou350121</copyright>
  <item>
    <title>DockAnywhere: 通过演示生成提升移动操作数据效率 (DockAnywhere: Data-Efficient Visuomotor Policy Learning for Mobile Manipulation via Novel Demonstration Generation)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/dockanywhere_data_efficient_visuomotor_policy_learning_for_m_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">0dd204547e8b79e6691e6a55cf867d418bba0def</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>力场流匹配：从单演示生成力觉数据学习 3D 顺应性策略 (Flow with the Force Field: Learning 3D Compliant Flow Matching Policies from Force and Demonstration-Guided Simulation Data)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/diffusion-flow/flow_with_the_force_field_learning_3d_compliant_flow_matchin_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">582ab8b1285d78829695d5a1dbfff6e9b8626b70</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>diffusion-flow</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：diffusion-flow · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>X-Diffusion: 跨具身人类演示训练扩散策略 (X-Diffusion: Training Diffusion Policies on Cross-Embodiment Human Demonstrations)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/diffusion-flow/x_diffusion_training_diffusion_policies_on_cross_embodiment_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">f6966b7bec941c41842c7c261ec7856f7ac84cb0</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>diffusion-flow</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：diffusion-flow · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>潜空间综述：语言模型的&quot;原生思维空间&quot;与具身智能的统一接口</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/foundation/latent_space_survey_foundation_evolution_mechanism_ability_2026.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">66c2e42c7c4296fba44142b386310c43bdc7819e</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>foundation</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：foundation · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>免微调部署 VLA：即插即用推理时策略引导 (Towards Deploying VLA without Fine-Tuning: Plug-and-Play Inference-Time VLA Policy Steering via Embodied Evolutionary Diffusion)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/foundation/towards_deploying_vla_without_fine_tuning_plug_and_play_infe_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">ed621c37ffd18d8376558390ea09446996ed45a9</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>foundation</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：foundation · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>VLA 数据工程指南：从采集到训练的完整链路</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/foundation/vla_data_engineering_guide.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">4a9083a8253059735acf0bc2a543f24445532897</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>foundation</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：foundation · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>cuRoboV2：高自由度机器人的动力学感知运动生成 (cuRoboV2: Dynamics-Aware Motion Generation with Depth-Fused Distance Fields for High-DoF Robots)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/frontier/curobov2_dynamics_aware_motion_generation_with_depth_fused_d_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">e842b8e4559b45843e6786bb9f84cc1e66142d33</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>frontier</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：frontier · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>人工三元智能：生物启发的物理 AI 传感器优先架构 (Artificial Tripartite Intelligence: A Bio-Inspired, Sensor-First Architecture for Physical AI)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/frontier/emerging_ideas_artificial_tripartite_intelligence_a_bio_insp_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">df1c1c0566f42679981ce84dcd2ce7a83453b4e3</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>frontier</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：frontier · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>IGen: 从开放世界图像可扩展生成机器人学习数据 (IGen: Scalable Data Generation for Robot Learning from Open-World Images)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/frontier/igen_scalable_data_generation_for_robot_learning_from_open_w_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">f8dd17de2ab58766fee4c0567101df30732fb9dd</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>frontier</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：frontier · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>分层时空动作分词器：上下文模仿学习的新范式 (HiST-AT: A Hierarchical Spatiotemporal Action Tokenizer for In-Context Imitation Learning)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/planning/a_hierarchical_spatiotemporal_action_tokenizer_for_in_contex_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">ce71bd8a5feed023168f7484c28ab9f59cc7f1f8</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>planning</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：planning · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>HazardArena：评估 VLA 模型的语义安全 (HazardArena: Evaluating Semantic Safety in Vision-Language-Action Models)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/planning/hazardarena_evaluating_semantic_safety_in_vision_language_action_models_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">0025801bcccc5534b4e408da4ee8f15c8296b25c</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>planning</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：planning · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>GR00T-N1.7：NVIDIA 的开源通用机器人基础模型——从人形到任意形态</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/groot_n1_7_nvidia_open_foundation_model_2026.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">e08438fedb26e35d5a1dab25257656296cb449f3</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>HAMLET：将视觉 - 语言 - 动作模型转换为历史感知策略 (HAMLET: Switch your Vision-Language-Action Model into a History-Aware Policy)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/hamlet_switch_your_vision_language_action_model_into_a_histo_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">1594b770197dd6210e10afa61da3ab6fe0e6e514</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>LingBot-VLA：20,000 小时真实数据预训练的实用主义 VLA</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/lingbot_vla_pragmatic_foundation_model_2026.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">0a4121a5a312dee0e40239cf2be9ad6c1a665753</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>完全开源 VLA 选型指南：谁是真开源，谁在&quot;开源洗&quot;</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/open_source_vla_guide.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">dde2f45752bb7fd79c2831df6989e502fd13572e</guid>
    <pubDate>Mon, 20 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>多模态操作 via 多模态策略共识 (Multi-Modal Manipulation via Multi-Modal Policy Consensus)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/tactile/multi_modal_manipulation_via_multi_modal_policy_consensus_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">f2128b42959788aebc48e0bbc8307fbdee4cf7d5</guid>
    <pubDate>Sun, 19 Apr 2026 00:00:00 GMT</pubDate>

    <category>tactile</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：tactile · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>BLaDA：在 3DGS 场中桥接语言与功能性灵巧动作 (BLaDA: Bridging Language to Functional Dexterous Actions within 3DGS Fields)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/blada_bridging_language_to_functional_dexterous_actions_with_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">6825e622a7d7a72a25b8bcf36860419051af2fd1</guid>
    <pubDate>Fri, 17 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>迭代组合式数据生成用于机器人控制 (Iterative Compositional Data Generation for Robot Control)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/iterative_compositional_data_generation_for_robot_control_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">fd23697dbfcef99dc80307d79c22450c88a01990</guid>
    <pubDate>Fri, 17 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>StaMo：从紧凑状态表示中涌现通用机器人运动 (StaMo: Unsupervised Learning of Generalizable Robot Motion from Compact State Representation)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/frontier/stamo_unsupervised_learning_of_generalizable_robot_motion_fr_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">21114db5cb901c1a9b99ea5ad73aaa0cb5872c5b</guid>
    <pubDate>Fri, 17 Apr 2026 00:00:00 GMT</pubDate>

    <category>frontier</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：frontier · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>3D 优先：从 VGA 和 Spark 2.0 看具身智能的下一个表征革命</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/perception/3d_first_principle_vga_spark_embodied_representation_revolution.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">b4f19ab15735d5f2057ca6fcb619ddf644f2f35e</guid>
    <pubDate>Fri, 17 Apr 2026 00:00:00 GMT</pubDate>

    <category>perception</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：perception · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>VGA：机器人操作是视觉到几何的映射，不是视觉到语言到动作</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/perception/vga_vision_geometry_action_over_language_video_2026.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">6c2bc776a3c7684a53c22b296362cede5524948a</guid>
    <pubDate>Fri, 17 Apr 2026 00:00:00 GMT</pubDate>

    <category>perception</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：perception · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>π0.7：可操控的通用机器人基础模型，涌现出组合泛化能力</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/pi0_7_steerable_compositional_generalization_2026.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">bf26a12fdeaa70ae1273fd55a2ba34e69df9ff90</guid>
    <pubDate>Fri, 17 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>StarVLA-α：简化视觉 - 语言 - 动作系统的强基线 (StarVLA-α: Reducing Complexity in Vision-Language-Action Systems)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/foundation/starvla_alpha_reducing_complexity_in_vision_language_action_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">53c2e81c398aa0ffac8e8804c45bb967b064c45e</guid>
    <pubDate>Thu, 16 Apr 2026 00:00:00 GMT</pubDate>

    <category>foundation</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：foundation · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>StaMo：从紧凑状态表示中涌现通用机器人运动 (StaMo: Unsupervised Learning of Generalizable Robot Motion from Compact State Representation)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/frontier/stamo_unsupervised_learning_of_generalizable_robot_motion.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">d3af44f3165f4b0ddc7926adf03314c968615140</guid>
    <pubDate>Thu, 16 Apr 2026 00:00:00 GMT</pubDate>

    <category>frontier</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：frontier · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>Spark 2.0：李飞飞 World Labs 开源的 3DGS 网页渲染引擎——1 亿点云手机秒开</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/perception/spark_2_0_3dgs_web_renderer_world_labs_2026.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">b255f0517045a47a3ac24bd343b87519530b9282</guid>
    <pubDate>Thu, 16 Apr 2026 00:00:00 GMT</pubDate>

    <category>perception</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：perception · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>用自由语言指令操控人形机器人：统一运动词汇的大型语言动作模型 (Commanding Humanoid by Free-form Language: A Large Language Action Model with Unified Motion Vocabulary)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/frontier/commanding_humanoid_by_free_form_language_a_large_language_a_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">77fbc064b374c531b0b8fc611a7106ad71d0c142</guid>
    <pubDate>Wed, 15 Apr 2026 00:00:00 GMT</pubDate>

    <category>frontier</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：frontier · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>Déjà Vu：具身智能的经验反馈学习框架 (Dejavu: Towards Experience Feedback Learning for Embodied Intelligence)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/frontier/dejavu_towards_experience_feedback_learning_for_embodied_int_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">67081e50a1c34602bb61c38751518dd182bea127</guid>
    <pubDate>Wed, 15 Apr 2026 00:00:00 GMT</pubDate>

    <category>frontier</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：frontier · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>你有一张金票：用单个噪声向量提升生成式机器人策略 (You&apos;ve Got a Golden Ticket: Improving Generative Robot Policies With A Single Noise Vector)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/frontier/youve_got_a_golden_ticket_improving_generative_robot_policie_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">513e7c3da6211dc02311a313c450b9450c901fd0</guid>
    <pubDate>Wed, 15 Apr 2026 00:00:00 GMT</pubDate>

    <category>frontier</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：frontier · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>2D 还是 3D：谁主导 VLA 模型中的显著性？—— 三阶段 Token 剪枝框架与模态显著性感知 (2D or 3D: Who Governs Salience in VLA Models? -- Tri-Stage Token Pruning Framework with Modality Salience Awareness)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/2d_or_3d_who_governs_salience_in_vla_models_tri_stage_token_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">90f6eaced1263a18cec9788f2fc4be7ce4f748a4</guid>
    <pubDate>Wed, 15 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>SnapFlow：流匹配 VLA 的单步动作生成 (SnapFlow: One-Step Action Generation for Flow-Matching VLAs via Progressive Self-Distillation)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/diffusion-flow/snapflow_one_step_action_generation_for_flow_matching_vlas_v_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">a66a8938194587e2233709dc5040cc436efddd09</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>diffusion-flow</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：diffusion-flow · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>HY-Embodied-0.5：具身基础模型实战解析 (HY-Embodied-0.5: Embodied Foundation Models for Real-World Agents)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/foundation/hy_embodied_05_embodied_foundation_models_for_real_world_age_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">708937e7767f146e59f5146ce2dd1ca2b74ed8cb</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>foundation</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：foundation · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>RoSHI: 野外便携式全身动捕套装 (RoSHI: A Versatile Robot-oriented Suit for Human Data In-the-Wild)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/frontier/roshi_a_versatile_robot_oriented_suit_for_human_data_in_the_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">a1988054fd356cb63e5776f21de41f7239a2cfc6</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>frontier</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：frontier · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>Orion-Lite：将 LLM 推理能力蒸馏至高效纯视觉驾驶模型 (Orion-Lite: Distilling LLM Reasoning into Efficient Vision-Only Driving Models)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/planning/orion_lite_distilling_llm_reasoning_into_efficient_vision_on_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">9ffa082453a9e3503c33c1c000233627f2644543</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>planning</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：planning · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>可证明概率安全：具身 AI 系统的大规模部署新范式 (Towards Provable Probabilistic Safety for Scalable Embodied AI Systems)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/planning/towards_provable_probabilistic_safety_for_scalable_embodied_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">26f6ac4201f8bc12774138ca6d4eae6187583840</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>planning</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：planning · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>即时 VLA 自适应 via 测试时强化学习 (On-the-Fly VLA Adaptation via Test-Time Reinforcement Learning)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/rl/on_the_fly_vla_adaptation_via_test_time_reinforcement_learni_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">ea8aef591f30f1c1a83560a623b4834295b9f065</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>rl</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：rl · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>Genie Sim PanoRecon：从单张全景图快速生成沉浸式 3D 场景 (Genie Sim PanoRecon: Fast Immersive Scene Generation from Single-View Panorama)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/genie_sim_panorecon_fast_immersive_scene_generation_from_sin_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">f209ddb013b5598e5481a818e9e328ebbf10b7e7</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>GeoPredict：利用预测运动学与 3D 高斯几何实现精确 VLA 操作 (GeoPredict: Leveraging Predictive Kinematics and 3D Gaussian Geometry for Precise VLA Manipulation)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/geopredict_leveraging_predictive_kinematics_and_3d_gaussian_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">f5fa3c919e39ba35dcdf00f8c8139195ee91c4e3</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>HiF-VLA：通过运动表示实现后见、洞察与前瞻的视觉 - 语言 - 动作模型 (HiF-VLA: Hindsight, Insight and Foresight through Motion Representation for Vision-Language-Action Models)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/hif_vla_hindsight_insight_and_foresight_through_motion_repre_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">2332246b1c269280e588e37d295e8ba7b77bf98c</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>基于反思的任务适应：自改进 VLA 框架 (Reflection-Based Task Adaptation for Self-Improving VLA)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/reflection_based_task_adaptation_for_self_improving_vla_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">fdd0dcf5078a825b294dc499472337ced7aa04b7</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>UniLACT：深度感知 RGB 潜在动作学习 (UniLACT: Depth-Aware RGB Latent Action Learning for VLA Models)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/vla-core/unilact_depth_aware_rgb_latent_action_learning_for_vision_la_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">ba34e4c3a38bef616c335985aae7a519bbcc485a</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>vla-core</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：vla-core · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>DailyArt: 从单张静态图像发现关节结构 (DailyArt: Discovering Articulation from Single Static Images via Latent Dynamics)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/world-model/dailyart_discovering_articulation_from_single_static_images_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">550a23274229af4a973746799ae7a19b692fc87e</guid>
    <pubDate>Tue, 14 Apr 2026 00:00:00 GMT</pubDate>

    <category>world-model</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：world-model · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>TAMEn: 触觉感知操作引擎用于接触丰富任务中的闭环数据收集 (TAMEn: Tactile-Aware Manipulation Engine for Closed-Loop Data Collection in Contact-Rich Tasks)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/tactile/tamen_tactile_aware_manipulation_engine_for_closed_loop_data_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">83184f8889d138e4df57d538a63b102a210716d2</guid>
    <pubDate>Fri, 10 Apr 2026 00:00:00 GMT</pubDate>

    <category>tactile</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：tactile · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>🔧 部署与硬件 — 实战落地主线总纲</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/deployment_mainline.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">0928eec68e1d9fec8b739b2665c4b1c4a9447794</guid>
    <pubDate>Thu, 09 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>DexGrasp-Zero：形态对齐的零样本跨本体灵巧抓取策略 (DexGrasp-Zero: A Morphology-Aligned Policy for Zero-Shot Cross-Embodiment Dexterous Grasping)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/dexgrasp_zero_a_morphology_aligned_policy_for_zero_shot_cros_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">f1b1bc9486be3733b83e00c6417dc58b20554392</guid>
    <pubDate>Thu, 09 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>中金人机系列05（灵巧手）→ VLA/控制/硬件的“可计算约束”框架（理论侧整理）</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/dexterous_hand_industry_cicc_05.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">ca5b559a62f84b7685645639d19cbba825512eed</guid>
    <pubDate>Thu, 09 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>灵巧手机械学深度解析 (Dexterous Hand Mechanics) — 修订整合版 v2</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/dexterous_hand_mechanics.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">b5ecf97b12f4b2cc645c6b9baf7a1950d82657d1</guid>
    <pubDate>Thu, 09 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>机器人开可乐/发牌有多难？灵巧手：硬件路线 × 接触数学 × 数据金字塔（访谈摘录整理）</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/dexterous_hands_open_can_cards_data_pyramid.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">132b9dabc1f96c738cca8d76a03113902a673a03</guid>
    <pubDate>Thu, 09 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>EquiBim：双臂操作中的对称等变策略学习 (EquiBim: Learning Symmetry-Equivariant Policy for Bimanual Manipulation)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/equibim_learning_symmetry_equivariant_policy_for_bimanual_ma_dissection.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">2511476b10005927c1ba726ecfce3a3eff7e4057</guid>
    <pubDate>Thu, 09 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>GR-Dexter（ByteDance Seed）：把 VLA 扩展到高自由度灵巧手的“硬件-数据-模型”全栈框架</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/gr_dexter_bimanual_dexterous_vla.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">1963b8e98d20e280469224cda37031775ee424e7</guid>
    <pubDate>Thu, 09 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
  <item>
    <title>抓取算法与仿真平台 (Grasp Algorithms &amp; Simulation Platforms)</title>
    <link>https://github.com/sou350121/VLA-Handbook/blob/main/theory/deployment/grasp_algorithms.md?utm_source=rss&amp;utm_medium=feed&amp;utm_campaign=vla-theory</link>
    <guid isPermaLink="false">3e87d6dd75388c16929f0ccf0d23c53bbb8f349c</guid>
    <pubDate>Thu, 09 Apr 2026 00:00:00 GMT</pubDate>

    <category>deployment</category>
    <description><![CDATA[新 VLA 深度解读文章 · 归类：deployment · 点击链接到 GitHub 阅读全文。]]></description>
  </item>
</channel>
</rss>