Skip to content

byLLM Tutorials and Examples#

Tutorials#

  • RPG Game Level Genaration


    A Tutorial on building an AI-Intergrated RPG Game using byLLM.

    Start

  • Fantacy Trading Game


    A text-based trding game where non-player charaters are handled using large language models. This is use tool calling for game mechanics such as bargaining at shops.

    Start

  • AI-Powered Multimodal MCP Chatbot


    This Tutorial shows how to implemented a agentic AI application using the byLLM package and object-spatiol programming. In additionn MCP integration is demonstrated here.

    Start

Examples#

This section collects the example byllm programs bundled in jac-byllm/examples/. Examples are grouped by type. For each example the source is shown in a tab so you can quickly inspect the code.

Core Examples#

Small, focused examples that show common byLLM patterns for integrating LLMs in Jac programs.

Repository location: jac-byllm/examples/core_examples

Core examples (code)
import from byllm { Model }

glob llm = Model(verbose=True, model_name="gpt-4o-mini");

enum Personality {
    INTROVERT,
    EXTROVERT,
    AMBIVERT
}

obj Person {
    has full_name: str,
        yod: int,
        personality: Personality;
}
sem Person.yod = "Year of Death of the person";

glob personality_examples: dict[str, Personality] = {
    'Albert Einstein': Personality.INTROVERT,
    'Barack Obama': Personality.EXTROVERT
};

def get_person_info(name: str) -> Person by llm(
    reason=True,
    temperature=0.0,
    incl_info={"personality_examples": personality_examples}
);

with entry {
    person_obj = get_person_info('Martin Luther King Jr.');
    print(
        f"{person_obj.full_name} was a {person_obj.personality} person who died in {person_obj.yod}"
    );
}
import from byllm { Model }

glob llm = Model(model_name="gpt-4o");

obj Position {
    has x: int, y: int;
}

obj Wall {
    has start_pos: Position, end_pos: Position;
}

obj Map {
    has level: Level, walls: list[Wall], small_obstacles: list[Position];
    has enemies: list[Position];
    has player_pos: Position;
}

obj Level {
    has name: str, difficulty: int;
    has width: int, height: int, num_wall: int, num_enemies: int;
    has time_countdown: int, n_retries_allowed: int;
}

obj LevelManager {
    has current_level: int = 0, current_difficulty: int = 1,
        prev_levels: list[Level] = [], prev_level_maps: list[Map] = [];

    def create_next_level (last_levels: list[Level], difficulty: int, level_width: int, level_height: int)
    -> Level by llm();

    def create_next_map(level: Level) -> Map by llm();

    def get_next_level -> tuple(Level, Map) {
        self.current_level += 1;
        # Keeping Only the Last 3 Levels
        if len(self.prev_levels) > 3 {
            self.prev_levels.pop(0);
            self.prev_level_maps.pop(0);
        }
        # Generating the New Level
        new_level = self.create_next_level(
            self.prev_levels,
            self.current_difficulty,
            20, 20
        );

        self.prev_levels.append(new_level);
        # Generating the Map of the New Level
        new_level_map = self.create_next_map(new_level);
        self.prev_level_maps.append(new_level_map);
        # Increasing the Difficulty for end of every 2 Levels
        if self.current_level % 2 == 0 {
            self.current_difficulty += 1;
        }
        return (new_level, new_level_map);
    }
}

'''Get the map of the level'''
def get_map(map: Map) -> str {
    map_tiles = [['.' for _ in range(map.level.width)] for _ in range(map.level.height)];

    for wall in map.walls {
        for x in range(wall.start_pos.x, wall.end_pos.x + 1) {
            for y in range(wall.start_pos.y, wall.end_pos.y + 1) {
                map_tiles[y-1][x-1] = 'B';
            }
        }
    }

    for obs in map.small_obstacles {
        map_tiles[obs.y-1][obs.x-1] = 'B';
    }

    for enemy in map.enemies {
        map_tiles[enemy.y-1][enemy.x-1] = 'E';
    }
    map_tiles[map.player_pos.y-1][map.player_pos.x-1] = 'P';
    map_tiles = [['B'] + row + ['B'] for row in map_tiles];
    map_tiles = [['B' for _ in range(map.level.width + 2)]] + map_tiles + [['B' for _ in range(map.level.width + 2)]];
    return [''.join(row) for row in map_tiles];
}

with entry {
    level_manager = LevelManager();
    for i in range(2) {
        (new_level, new_level_map) = level_manager.get_next_level();
        print(new_level);
        print('\n'.join(get_map(new_level_map)));
    }
}

Vision / Multimodal examples#

Examples that demonstrate multimodal usage (images and video) with byLLM and vision-capable LLMs. Accompanying media files live alongside the Jac code in the repo.

Repository location: jac-byllm/examples/vision

Vision / Multimodal examples (code)

Vision-enabled examples that combine image/video inputs with byLLM workflows. Only the Jac code files are shown below; accompanying media are in the examples folder (e.g. person.png, receipt.jpg, mugen.mp4).

import from byllm { Model, Image }

glob llm = Model(verbose=True, model_name="gpt-4o");

def solve_math_question(question_img: Image) -> str by llm();

with entry {
    print(
        solve_math_question(Image.open('math_question.jpg'))
    );
}
import from byllm { Model, Image }

glob llm = Model(model_name="gpt-4o");

enum Personality {
    INTROVERT,
    EXTROVERT,
    AMBIVERT
}

obj Person {
    has full_name: str,
        yod: int,
        personality: Personality;
}

def get_person_info(img_of_person: Image) -> Person by llm();

with entry {
    person_obj = get_person_info(Image("person.png"));
    print(
        f"{person_obj.full_name} was a {person_obj.personality} person who died in {person_obj.yod}"
    );
}
import from byllm { Model, Image }

glob llm = Model(model_name="gpt-4o");

obj PurchasedItem {
    has name: str,
        price: float,
        quantity: int;
}

obj Receipt {
    has store: str,
        date: str,
        items: list[PurchasedItem],
        total: float;

    def pp() -> None {
        print(f"Store: {self.store}");
        print(f"Date: {self.date}");
        for item in self.items {
            print(f"{item.name} - {item.price} x {item.quantity}");
        }
        print(f"Total: {self.total}");
    }
}

def get_reciept(reciept_img: Image) -> Receipt by llm();
def verity_total(reciept: Receipt) -> bool by llm();

with entry {
    reciept_img = Image.open("receipt.jpg");
    receipt = get_reciept(reciept_img);
    receipt.pp();
    print(f"Total is correct: {verity_total(receipt)}");
}
import from byllm { Model, Video }

glob llm = Model(model_name="gpt-4o");

"""
Mugen is a moving character
"""
def is_aligned(video: Video, text: str) -> bool by llm();

with entry {
    video = Video("mugen.mp4", 1);
    text = "Mugen jumps off and collects few coins.";
    print(is_aligned(video, text));
}

Tool-calling examples#

Examples showing how to orchestrate external tools (APIs, search, or internal tool servers) from Jac/byLLM and how to coordinate multi-agent workflows.

Repository location: jac-byllm/examples/tool_calling

Tool-calling examples (code)

Examples that demonstrate calling external tools, tool orchestration, or multi-agent interactions.

import from byllm { Model }
import wikipedia;

glob llm = Model(verbose=True, model_name="gpt-4o-mini");

def get_wikipedia_summary(title: str) -> str {
    try {
        return wikipedia.summary(title);
    } except Exception {
        options = wikipedia.search(title, results=5, suggestion=True);
        raise Exception(f"Could not get summary for {title}. Similar titles: {options}");
    }
}
sem get_wikipedia_summary = """Get the summary of the related article from Wikipedia.""";


def get_answer(question: str) -> str by llm(tools=[get_wikipedia_summary]);

with entry {
    question = "Who is Jason Mars?";
    answer = get_answer(question);
    print(answer);
}
import from byllm { Model }

# Note that these tools are not available in byllm package and
# Should be defined by the user him/herself.
import from byllm.tools.wikipedia_utils { wikipedia_summary }
import from byllm.tools.serper { search, scrape }

glob llm = Model(model_name="gpt-4o-mini");

def save_output_as_md(output: str, filename: str) -> None {
    with open(filename, 'w') as f {
        f.write(output);
    }
}

def persona_expert(requirements: str) -> str
by llm(
    method="ReAct",
    tools=[wikipedia_summary, search, scrape],
    max_prev_react_outputs=10,
    max_react_iterations=10,
    context=["DONOT SUMMARIZE. MAKE IT DETAILED"]
);
def demographic_expert(requirements: str) -> str
by llm(
    method="ReAct",
    tools=[wikipedia_summary, search, scrape],
    max_prev_react_outputs=10,
    max_react_iterations=10,
    context=["DONOT SUMMARIZE. MAKE IT DETAILED"]
);
def market_expert(requirements: str) -> str
by llm(
    method="ReAct",
    tools=[wikipedia_summary, scrape, search],
    max_prev_react_outputs=10,
    max_react_iterations=10,
    context=["DONOT SUMMARIZE. MAKE IT DETAILED"]
);
def demand_expert(requirements: str) -> str
by llm(
    method="ReAct",
    tools=[wikipedia_summary, search, scrape],
    max_prev_react_outputs=10,
    max_react_iterations=10,
    context=["DONOT SUMMARIZE. MAKE IT DETAILED"]
);
def manager(query: str) -> str
by llm(
    method="ReAct",
    tools=[persona_expert, demographic_expert, market_expert, search, scrape],
    max_react_iterations=10,
    max_prev_react_outputs=10,
    context=["DONOT SUMMARIZE. MAKE IT DETAILED"]
);

with entry {
    query = "Jaclang is a native superset of python with a focus on AI and ML. Jaclang allows developers to"
        "prototype AI Applications with ease by providing high level abstractions for AIML Usecases."
        "Perform a detailed analysis of the market for Jaclang and provide a detailed report on the market,"
        "demand, demographics and personas for the product.";
    save_output_as_md(manager(query), "marketing_report.md");
}
"""
Fantasy Trading Game - An interactive RPG trading simulation using byLLM
Demonstrates byLLM character generation, conversation, and transaction systems
"""

import from byllm { Model }
import from os { get_terminal_size }


glob llm = Model(model_name="gpt-4o");


glob person_record: dict = {};

obj InventoryItem {
    has name: str;
    has price: float;
}


obj Person {
    has name: str;
    has age: int;
    has hobby: str;
    has description: str;
    has money: float;
    has inventory: list[InventoryItem];
}


"""
A Chat object contains the person's name and the message.
"""
obj Chat {
    has person: str;
    has message: str;
}

"""
Makes a transaction between buyer and seller for the specified item.
Returns true if successful, false otherwise. The price is optional,
if not provided, the item's price is used (if they negotiate to a different price that should be given here
otherwiwse the price is optional and None will be used as a default parameter.
"""
def make_transaction(buyer_name: str, seller_name: str, item_name: str, price: int| None = None) -> bool {
    buyer = person_record[buyer_name];
    seller = person_record[seller_name];

    # Find the item in seller's inventory
    item_to_buy = None;
    item_index = -1;

    for i in range(len(seller.inventory)) {
        if seller.inventory[i].name.lower() == item_name.lower() {
            item_to_buy = seller.inventory[i];
            item_index = i;
            break;
        }
    }

    price = price or item_to_buy.price;

    # Check if item exists and buyer has enough money
    if not item_to_buy or buyer.money < price {
        return False;
    }

    # Transfer item and money
    buyer.money -= price;
    seller.money += price;
    buyer.inventory.append(item_to_buy);
    seller.inventory.pop(item_index);

    return True;
}


"""
Generates the player character for a fantasy RPG game.
"""
def make_player() -> Person
    by llm();


"""
Generates a random npc person with a name, age, favourite pet and hobby.
The person should be a fantasy character, like an elf, dwarf, orc, etc.
"""
def make_random_npc() -> Person
    by llm();


"""
Generates the next line of dialogue from the given NPC in an ongoing
conversation with the player. If no chat history is provided, generates
the NPC's initial greeting. The NPC's response should reflect their
personality, background, and any prior context from the chat history.

Before making a transaction, the NPC confirm with the player and after
they said yes, the transaction is made. Make sure the NPC doesn't give
an item way way less than its price, but they can negotiate a bit.
"""
def chat_with_player(player: Person, npc: Person, chat_history: list[Chat]) -> Chat
    by llm(tools=[make_transaction]);


    def clear_screen() {
        # ANSI escape sequence to clear screen and move cursor to top-left
        print("\033[2J\033[H", end="");
    }

    def print_inventory_table() {
        # Get terminal dimensions
        terminal_size = get_terminal_size();
        terminal_width = terminal_size.columns;
        terminal_height = terminal_size.lines;

        # Use full terminal width
        separator = "=" * terminal_width;
        print(separator);
        print("INVENTORY STATUS".center(terminal_width));
        print(separator);

        # Header
        player_header = "  PLAYER: " + player.name;
        npc_header = "NPC: " + npc.name;
        player_money = "  Money: $" + str(player.money);
        npc_money = "Money: $" + str(npc.money);

        # Calculate column widths based on terminal width
        half_width = (terminal_width - 2) // 2;
        print(f"{player_header.ljust(half_width)}{npc_header.ljust(half_width)}");
        print(f"{player_money.ljust(half_width)}{npc_money.ljust(half_width)}");
        print("-" * terminal_width);

        # Get max inventory length for proper formatting
        player_len = len(player.inventory);
        npc_len = len(npc.inventory);
        max_items = player_len if player_len > npc_len else npc_len;

        for i in range(max_items) {
            player_item = "";
            npc_item = "";

            if i < len(player.inventory) {
                item = player.inventory[i];
                player_item = "  " + item.name + " - $" + str(item.price);
            }

            if i < len(npc.inventory) {
                item = npc.inventory[i];
                npc_item = item.name + " - $" + str(item.price);
            }

            # Use dynamic column widths
            print(f"{player_item.ljust(half_width)}{npc_item.ljust(half_width)}");
        }

        print("=" * terminal_width);
        print("");  # Add spacing after inventory

        return 7 + max_items;  # Return number of lines used for inventory
    }

    def display_chat_history() {

        # Get terminal dimensions
        terminal_size = get_terminal_size();
        terminal_width = terminal_size.columns;
        terminal_height = terminal_size.lines;


        print("CONVERSATION".center(terminal_width));
        print("-" * terminal_width);

        # Calculate available space for chat messages
        inventory_lines = 7 + max(len(player.inventory), len(npc.inventory));
        available_lines = terminal_height - inventory_lines - 4;  # Reserve lines for input prompt and headers

        # Calculate how many messages we can show based on available space
        # Each message takes approximately 5-6 lines (speaker + bubble)
        lines_per_message = 6;
        max_messages = max(1, available_lines // lines_per_message);

        # Always show only the most recent messages that fit
        recent_messages = chat_display_history[-max_messages:] if len(chat_display_history) > max_messages else chat_display_history;

        for chat_msg in recent_messages {
            if chat_msg["type"] == "npc" {
                print_speech_bubble_inline(chat_msg["speaker"], chat_msg["message"], True);
            } else {
                print_speech_bubble_inline(chat_msg["speaker"], chat_msg["message"], False);
            }
        }
    }

    def print_speech_bubble_inline(speaker: str, message: str, is_npc: bool = True) {

        # Get terminal dimensions
        terminal_size = get_terminal_size();
        terminal_width = terminal_size.columns;
        terminal_height = terminal_size.lines;


        # Create speech bubble effect
        lines = [];
        words = message.split();
        current_line = "";
        # Use terminal width to determine max width for speech bubbles
        max_width = min(60, terminal_width - 20);  # Leave some margin

        for word in words {
            test_line = current_line + " " + word if current_line else word;
            if len(test_line) <= max_width {
                current_line = test_line;
            } else {
                if current_line {
                    lines.append(current_line);
                }
                current_line = word;
            }
        }
        if current_line {
            lines.append(current_line);
        }

        # Calculate bubble width based on terminal width
        bubble_width = min(64, terminal_width - 16);  # Adaptive bubble width

        if is_npc {
            # NPC speech bubble (left side)
            print(speaker + ":");
            print("." + "-" * (bubble_width - 2) + ".");
            for line in lines {
                padding_needed = bubble_width - 4 - len(line);
                padding = " " * padding_needed if padding_needed > 0 else "";
                print("| " + line + padding + " |");
            }
            print("'" + "-" * (bubble_width - 2) + "'");
        } else {
            # Player input bubble (right side, heavily indented)
            indent_size = max(20, terminal_width - bubble_width - 4);  # Push to right side
            indent = " " * indent_size;
            print(indent + speaker + ":");
            print(indent + "." + "-" * (bubble_width - 2) + ".");
            for line in lines {
                padding_needed = bubble_width - 4 - len(line);
                padding = " " * padding_needed if padding_needed > 0 else "";
                print(indent + "| " + line + padding + " |");
            }
            print(indent + "'" + "-" * (bubble_width - 2) + "'");
        }
        print("");  # Add spacing between messages
    }

    def render_ui() {

        # Get terminal dimensions
        terminal_size = get_terminal_size();
        terminal_width = terminal_size.columns;
        terminal_height = terminal_size.lines;


        clear_screen();
        inventory_lines = print_inventory_table();
        display_chat_history();

        # Fill remaining space to push input to bottom
        used_lines = inventory_lines + 2;  # +2 for conversation header
        chat_messages_count = len(chat_display_history);
        if chat_messages_count > 0 {
            # Calculate space used by recent messages
            available_lines = terminal_height - inventory_lines - 4;
            lines_per_message = 6;
            max_messages = max(1, available_lines // lines_per_message);
            displayed_messages = min(chat_messages_count, max_messages);
            used_lines += displayed_messages * lines_per_message;
        }

    }

with entry {

    # Example hardcoded characters (commented out for AI generation)
    # player = Person(name="Arin", age=24, hobby="swordsmanship", description="A brave and agile warrior skilled with the blade, ready to face any challenge.", money=150.0, inventory=[InventoryItem(name="Iron Sword", description="A sturdy iron sword, balanced and reliable.", price=100.0), InventoryItem(name="Leather Armor", description="Lightweight armor offering decent protection.", price=75.0), InventoryItem(name="Healing Potion", description="Restores health when consumed.", price=25.0)]);
    # npc = Person(name="Thalor", age=137, hobby="herbalism", description="An ancient elf who loves tending to mystical plants and caring for his pet raven.", money=80.0, inventory=[InventoryItem(name="Herb Pouch", description="A collection of rare herbs for potions.", price=40.0), InventoryItem(name="Raven Feather", description="A magical feather from his pet raven.", price=30.0)]);

    # Generate AI-powered characters
    player = make_player();
    npc = make_random_npc();

    person_record[player.name] = player;
    person_record[npc.name] = npc;

    history = [];
    chat_display_history = [];  # Store chat messages for display

    # Initial screen render
    render_ui();

    while True {
        # Generate NPC response and add to history
        chat = chat_with_player(player, npc, history);
        history.append(chat);

        # Add NPC message to display history
        chat_display_history.append({
            "type": "npc",
            "speaker": npc.name,
            "message": chat.message
        });

        # Re-render UI with new message
        render_ui();

        # Get player input
        inp = input("\nPlayer: ");
        if inp {
            # Add player message to display history
            chat_display_history.append({
                "type": "player",
                "speaker": player.name,
                "message": inp
            });

            history.append(Chat(person=player.name, message=inp));

            # Re-render UI with player response
            render_ui();
        }

    }

}
import from byllm { Model }
import wikipedia;

glob llm = Model(model_name="gpt-4o-mini");

def get_wikipedia_summary(title: str) -> str {
    try {
        return wikipedia.summary(title);
    } except Exception {
        options = wikipedia.search(title, results=5, suggestion=True);
        raise Exception(f"Could not get summary for {title}. Similar titles: {options}");
    }
}
sem get_wikipedia_summary = """Get the summary of the related article from Wikipedia.""";

def ask_opponent(statement: str) -> str {
    user_input = input(f"AI -> {statement} ");
    return f"Opponents Answer -> {user_input}";
}

def state_facts(information: str) -> None {
    print(f"AI -> {information}");
}

def debate_agent(topic: str) -> str by llm(
    tools=[get_wikipedia_summary, ask_opponent, state_facts],
    context=[
        "You have to defend the given topic while the opponent is defending the counter topic",
        "If you dont know about the topic or you want to verify the opponents claims use the given tools",
        "You can ask opponent counter questions",
        "You are a humorous, cunning, very arrogant debater.",
    ]
);

with entry {
    debate_agent('Merlin the Wizard is still alive.');
}

Agentic AI examples#

Small agentic patterns and lightweight multi-step reasoning examples (multi-turn planning, simple agents). These live under the agentic_ai examples folder.

Repository location: jac-byllm/examples/agentic_ai

Agentic AI examples (code)

Examples that demonstrate small agentic behaviors and light-weight multi-step reasoning.

import from mtllm.llm  { Model, Image }
import from typing { List, Dict, Any }
import os;

def clear_terminal() -> None {
    # For Windows
    if os.name == "nt" {
        os.system("cls");
    # For macOS / Linux
    } else {
        os.system("clear");
    }
}

glob llm = Model(model_name="gpt-4.1");

obj Response {
    has follow_up_questions: str;
    has summary: str;
    has when: str;
    has who: List[str];
    has what: str;
    has where: List[str];
    has terminate_conversation: bool;
    has show_summary: bool;
}

sem Response = "Memory details refined using the user's input and prior context.";
sem Response.follow_up_questions = "Ask one follow-up question to continue the conversation. If all required details are present, ask to terminate the conversation.";
sem Response.summary = "A concise summary of the memory.";
sem Response.when = "The date of the memory in YYYY-MM-DD format.";
sem Response.who = "Exact names of people in the memory (e.g., [John, Kate]); return [] if none.";
sem Response.where = "List of places relevant to the memory.";
sem Response.what = "What the memory is about.";
sem Response.show_summary = "True if all required details are present and the summary should be shown.";
sem Response.terminate_conversation = "True if the user asked to terminate the conversation; otherwise false.";

""" Update and extract memory details based on user input and context."""
def update_memory_details(
    image: Image,
    utterance: str = "",
    summary: str = "",
    when: str = "",
    who: List[str] = [],
    where: List[str] = [],
    what: str = "",
    conversation: List[dict] = [],
    show_summary: bool = False,
    terminate_conversation: bool = False
) -> Response by llm();

node session{
    has summary: str = "";
    has when: str = "";
    has who: list = [];
    has where: list = [];
    has what: str = "";
    has conversation : list = [];
    has image_url: str = "";
    has show_summary: bool = False;
    has terminate_conversation: bool = False;
}

walker update_session {
    has image_url: str;
    has utterance: str = "";

    can visit_session with `root entry {
        visit [-->](`?session) else {
            session_node = here ++> session();
            visit session_node[0];
        }
    }

    can update_session with session entry {
        if here.image_url == "" {
            here.image_url = self.image_url;
        }
        response = update_memory_details(
            image = Image(url=self.image_url),
            utterance = self.utterance,
            summary = here.summary,
            when = here.when,
            who = here.who,
            where = here.where,
            what = here.what,
            conversation = here.conversation,
            show_summary = here.show_summary,
            terminate_conversation = here.terminate_conversation
        );
        here.summary = response.summary;
        here.when = response.when;
        here.who = response.who;
        here.where = response.where;
        here.what = response.what;
        here.show_summary = response.show_summary;
        self.show_summary = response.show_summary;
        here.terminate_conversation = response.terminate_conversation;
        self.terminate_conversation = response.terminate_conversation;
        here.conversation = here.conversation + [{"role": "user", "content": self.utterance}] + [{"role": "assistant", "content": response.follow_up_questions}];

        if response.show_summary {
            if response.terminate_conversation {
                print("🤖 Assistant: " + response.follow_up_questions);
                print("✅ Final Memory Summary:" + response.summary);
            } else {
                print("📝 Memory Summary (in progress):" + response.summary);
                print("🤖 Assistant: " + response.follow_up_questions);
            }
        } else {
            print("🤖 Assistant: " + response.follow_up_questions);
        }
    }
}

with entry {
    clear_terminal();
    print("🚀 Starting Friendzone Lite...");
    image_url = input("🌐 Please enter an image URL: ");
    if image_url == "" {
        print("❌ No image URL provided. Exiting.");
        exit(1);
    }
    walker_obj = root spawn update_session(image_url=image_url);
    while not (walker_obj.show_summary and walker_obj.terminate_conversation) {
        utterance = input("👤 You: ");
        walker_obj = root spawn update_session(utterance=utterance, image_url=image_url);
    }
}
import from byllm { Model }
import from pathlib { Path }

glob llm = Model(model_name="gpt-4o-mini");

# Simple task object with semantic annotations
obj Task {
    has name: str;
    has type: str;
    has details: str;
    has priority: int = 1;
}
sem Task = "A specific development task with clear implementation requirements";
sem Task.name = "Clear, descriptive name for the task";
sem Task.type = "Task category: code, fix, docs, or test";
sem Task.details = "Specific implementation instructions";
sem Task.priority = "Task priority: 1=high, 2=medium, 3=low";

obj CodeResult {
    has task_name: str = "";
    has code: str = "";
    has status: str = "";
    has feedback: str = "";
}
sem CodeResult = "Result of code generation task";
sem CodeResult.task_name = "Name of the completed task";
sem CodeResult.code = "Generated code solution";
sem CodeResult.status = "Task completion status: success or failed";
sem CodeResult.feedback = "Validation feedback and suggestions";

# Core AI functions
def create_plan(request: str) -> list[Task] by llm(method="Reason");
def generate_solution(task: Task) -> str by llm(method="Reason");
def validate_code(code: str, task: Task) -> str by llm(method="Reason");

# Nodes for walker traversal
node TaskNode {
    has task: Task = Task(name="", type="", details="", priority=1);
    has code: str = "";
    has feedback: str = "";
    has status: str = "pending";
    has result: CodeResult = CodeResult();

    def process_task() {
        print(f"⚡ Working on: {self.task.name}");
        self.code = generate_solution(self.task);
        self.feedback = validate_code(self.code, self.task);
        self.status = "success" if self.feedback else "failed";
        print(f"✅ Completed: {self.task.name}");

        # Create result
        self.result.task_name = self.task.name;
        self.result.code = self.code;
        self.result.status = self.status;
        self.result.feedback = self.feedback;
    }
}

node SummaryNode {
    has results: list[CodeResult] = [];

    def show_summary() {
        output = f"🎯 Summary ({len(self.results)} tasks):\n\n";
        for result in self.results {
            status_icon = "✅" if result.status == "success" else "❌";
            output += f"{status_icon} {result.task_name}\n";
            if result.code and len(result.code) > 0 {
                code_preview = result.code[:300];
                if len(result.code) > 300 {
                    code_preview += "...";
                }
                output += f"   Code: {code_preview}\n";
            }
            if result.feedback {
                output += f"   Feedback: {result.feedback[:100]}...\n";
            }
            output += "\n";
        }
        print(output);
    }
}

# GeniusAgent as a walker
walker GeniusAgent {
    has request: str;
    has tasks: list[Task] = [];
    has results: list[CodeResult] = [];
    has current_task_index: int = 0;

    can start with `root entry {
        print("🚀 Genius Lite - AI Coding Assistant");
        print("Simple, structured code generation with validation");
        print("=" * 50);
        self.tasks = create_plan(self.request);
        print(f"📋 Created {len(self.tasks)} tasks");

        if len(self.tasks) > 0 {
            # Create task nodes and connect them
            task_nodes = [];
            for task in self.tasks {
                task_node = TaskNode();
                task_node.task = task;
                task_nodes.append(task_node);
            }

            # Connect nodes in sequence
            for i in range(len(task_nodes) - 1) {
                task_nodes[i] ++> task_nodes[i + 1];
            }

            # Connect last task node to summary
            summary_node = SummaryNode();
            summary_node.results = self.results;
            task_nodes[-1] ++> summary_node;

            # Start traversal from first task
            visit task_nodes[0];
        } else {
            print("No tasks created, ending execution");
        }
    }

    can process_task with TaskNode entry {
        # Let the node handle its own processing
        here.process_task();

        # Collect result from node and add to walker's results
        self.results.append(here.result);

        # Continue to next node
        visit [-->];
    }

    can show_summary with SummaryNode entry {
        # Pass results to the summary node
        here.results = self.results;
        # Let the node handle its own summary display
        here.show_summary();
    }
}

# Main execution
with entry {
    user_request = "Create a Python calculator with basic math operations";
    print(f"📝 Demo: {user_request}");
    print("-" * 50);
    agent = GeniusAgent(request=user_request) spawn root;
    print("\n🎉 Genius Lite Demo Complete!");
    print("💡 Features: Task planning, code generation, validation");
}

Microbenchmarks#

Short, single-purpose microbenchmarks for probing model behavior and performance on targeted tasks. Used for evaluations done in the MTPm paper.

Repository location: jac-byllm/examples/microbenchmarks

Microbenchmarks (code)

Small microbenchmarks and single-purpose prompts useful for testing model behavior and performance.

import from byllm { Model }


obj Employer {
    has employer_name: str,
        location: str;
}

obj Person {
    has name: str,
        age: int,
        employer: Employer,
        job: str;
}

glob llm = Model(
    # model_name="gpt-4o",
    model_name="mockllm",
    print_prompt=True,
    outputs=[
        Person(name='Steve Jobs', age=56, employer=Employer(employer_name='Apple Inc.', location='California'), job='CEO')
    ]
);

def generate_person(info:str) -> Person by llm();

with entry {
    person = generate_person("Steve Jobs was 56 years old and worked as the CEO of Apple Inc. in California.");
    print(
        f"Person's name is {person.name} and works at {person.employer.employer_name} which is located in {person.employer.location}."
    );
}
import from byllm { Model }

glob llm = Model(model_name="gpt-4o");

obj OddWord {
    has options: list[str];
    has reasoning: str;
    has result: str;
}

glob examples: list[OddWord] = [
    OddWord(options=["skirt", "dress", "pen", "jacket"],
                   reasoning="skirt is clothing, dress is clothing, pen is an object, jacket is clothing.",
                   result="pen"),

    OddWord(options=["Spain", "France", "German", "England", "Singapore"],
                   reasoning="Spain, France, England, Singapore is a country, German is a language.",
                   result="German"),
];
sem examples = "Examples for Picking Odd Word out (Options, Reasoning, Result)";

def odd_word_out_and_reason(options: list[str]) -> OddWord by llm(
    incl_info={"examples" : examples}
);

with entry {
    print(
        odd_word_out_and_reason(
            ["Bentley", "Ferrari", "Lamborghini", "Casio", "Toyota"]
        )
    );
}
import from byllm { Model }

glob llm = Model(
    # model_name="gpt-4o",
    model_name="gemini/gemini-2.5-flash",
);

obj PunclineJokes {
    has jokes: list[dict] = [
        {
            "joke": "How does a penguin build its house?",
            "punchline": "Igloos it together."
        },
        {
            "joke": "Which knight invented King Arthur's Round Table?",
            "punchline": "Sir Cumference."
        }
    ];

    def generate_joke -> dict[str, str] by llm();
    def generate {
        joke_punchline = self.generate_joke();
        self.jokes.append(joke_punchline);
    }
}

sem PunclineJokes.joke = 'Jokes with Punchlines';

with entry {
    joke_gen = PunclineJokes();
    for i in range(5) {
        joke_gen.generate();
    }
    print(joke_gen.jokes);
}
import from byllm { Model }

glob llm = Model();

def correct_grammar(text: str) -> str by llm(temperature=0.9);

with entry {
    files_path = input("Enter the file path to the text file: ");
    with open(files_path, 'r') as file {
        text = file.read();
    }
    print("Original text:", text);
    corrected_text = correct_grammar(text);
    print("Corrected text:", corrected_text);
}
import from byllm { Model }

glob llm = Model(model_name="gpt-4o-mini", verbose=True);

def translate(input: str, lang: str="French") -> str by llm();

with entry {
    print(translate("I am a student", "French"));
    print(translate("I am a student", "Spanish"));
}
import from byllm { Model }

glob llm = Model(model_name="gpt-4o");

obj Essay {
    has essay: str;

    def essay_judge(criteria: str) -> str by llm();
    def generate_summary(judgements: dict) -> str by llm();
    def give_grade(summary: str) -> str by llm();
}

with entry {
    essay = "With a population of approximately 45 million Spaniards and 3.5 million immigrants,"
        "Spain is a country of contrasts where the richness of its culture blends it up with"
        "the variety of languages and dialects used. Being one of the largest economies worldwide,"
        "and the second largest country in Europe, Spain is a very appealing destination for tourists"
        "as well as for immigrants from around the globe. Almost all Spaniards are used to speaking at"
        "least two different languages, but protecting and preserving that right has not been"
        "easy for them.Spaniards have had to struggle with war, ignorance, criticism and the governments,"
        "in order to preserve and defend what identifies them, and deal with the consequences.";
    essay = Essay(essay);
    criterias = ["Clarity", "Originality", "Evidence"];
    judgements = {};
    for criteria in criterias {
        judgement = essay.essay_judge(criteria);
        judgements[criteria] = judgement;
    }
    summary = essay.generate_summary(judgements);
    grade = essay.give_grade(summary);
    print("Reviewer Notes: ", summary);
    print("Grade: ", grade);
}
import from byllm { Model }

glob llm = Model();

def get_expert(question: str) -> str by llm(method='Reason');
def get_answer(question: str, expert: str) -> str by llm();

with entry {
    question = "What are Large Language Models?";
    expert = get_expert(question);
    answer = get_answer(question, expert);
    print(f"{expert} says: '{answer}' ");
}