Skip to content

Getting Started

Customize a Motion to fit a robot's action space.

from mbodied.types.motion.control import HandControl, FullJointControl
from mbodied.types.motion import AbsoluteMotionField, RelativeMotionField

class FineGrainedHandControl(HandControl):
    comment: str = Field(None, description="A comment to voice aloud.")
    index: FullJointControl = AbsoluteMotionField([0,0,0],bounds=[-3.14, 3.14], shape=(3,))
    thumb: FullJointControl = RelativeMotionField([0,0,0],bounds=[-3.14, 3.14], shape=(3,))

Run a robotics transformer model on a robot.

import os
from mbodied.agents import LanguageAgent
from mbodied.agents.motion import OpenVlaAgent
from mbodied.agents.sense.audio import AudioAgent
from mbodied.robots import SimRobot

cognition = LanguageAgent(
  context="You are an embodied planner that responds with a python list of strings and nothing else.",
  api_key=os.getenv("OPENAI_API_KEY"),
  model_src="openai",
  recorder="auto",
)
audio = AudioAgent(use_pyaudio=False, api_key=os.getenv("OPENAI_API_KEY")) # pyaudio is buggy on mac
motion = OpenVlaAgent(model_src="https://api.mbodi.ai/community-models/")

# Subclass and override do() and capture() methods.
robot = SimRobot()

instruction = audio.listen()
plan = cognition.act(instruction, robot.capture())

for step in plan.strip('[]').strip().split(','):
  print("\nMotor agent is executing step: ", step, "\n")
  for _ in range(10):
    hand_control = motion.act(step, robot.capture())
    robot.do(hand_control)

Example Scripts:

Notebooks

Real Robot Hardware: Open In Colab

Simulation with: SimplerEnv : Open In Colab

Run OpenVLA with embodied-agents in simulation: Open In Colab

Record dataset on a robot: