The Agent-first Open Standard for AI workflows

Open-source language. Shareable pipelines.
Deterministic results. Built for agents.

Thank you! Successfully signed up for Pipelex 🎉
Oops! Something went wrong while submitting the form.
domain = "invoice_extraction"   [concept] Invoice = "Invoice information extracted from text" InvoiceDetails = "The category of the invoice"   [pipe] [pipe.process_invoice] PipeSequence = "Process relevant information from an invoice" inputs = { ocr_input = "PDF" } output = "Invoice" steps = [ { pipe = "extract_text_from_image", result = "invoice_pages" }, { pipe = "extract_invoice", batch_over = "invoice_pages", batch_as = "invoice_page", result = "invoice" }, ]   [pipe.extract_text_from_image] PipeOcr = "Extract page contents from an image" inputs = { ocr_input = "PDF" } output = "Page" page_views = true   [pipe.extract_invoice] PipeSequence = "Extract invoice information from an invoice text transcript" inputs = { invoice_page = "Page" } output = "Invoice" steps = [ { pipe = "analyze_invoice", result = "invoice_details" }, { pipe = "extract_invoice_data", result = "invoice" }, ]   [pipe.analyze_invoice] PipeLLM = "Analyze the invoice" inputs = { "invoice_page.page_view" = "Image", invoice_page = "Page" } output = "InvoiceDetails" prompt_template = """ Analyze this invoice:   @invoice_page.text_and_images.text.text """   [pipe.extract_invoice_data] PipeLLM = "Extract invoice information from an invoice text transcript" inputs = { "invoice_page.page_view" = "Image", invoice_details = "InvoiceDetails", invoice_page = "Page" } output = "Invoice" llm = "llm_to_extract_invoice" prompt_template = """ Extract invoice information from this invoice:   The category of this invoice is: $invoice_details.category.   @invoice_page.text_and_images.text.text """
domain = "swe_diff" definition = "Pipelines for analyzing differences between two versions of a codebase."   [concept] GitDiff = "A git diff output showing changes between two versions of a codebase" StructuredChangelog = "A structured changelog with sections for each type of change." MarkdownChangelog = "A text report in markdown format that summarizes the changes made to the codebase between two versions."   [pipe] [pipe.write_changelog] PipeSequence = "Write a comprehensive changelog for a software project" inputs = { git_diff = "GitDiff" } output = "MarkdownChangelog" steps = [ { pipe = "write_changelog_from_git_diff", result = "structured_changelog" }, { pipe = "format_changelog_as_markdown", result = "markdown_changelog" }, ]   [pipe.write_changelog_from_git_diff] PipeLLM = "Write a changelog for a software project." inputs = { git_diff = "GitDiff" } output = "StructuredChangelog" llm = "llm_for_swe" system_prompt = """ You are an expert technical writer and software architect. Your task is to carefully review the code diff and write a structured changelog. """ prompt_template = """ Analyze the following code diff. Write a structured changelog that summarizes the changes made to the codebase between two versions. Be sure to include changes to code but also complementary pipelines, scripts, docs.   @git_diff """   [pipe.format_changelog_as_markdown] PipeJinja2 = "Format the final changelog in markdown with proper structure" inputs = { structured_changelog = "StructuredChangelog" } output = "MarkdownChangelog" template_category = "markdown" jinja2 = """ ## Unreleased   {% if structured_changelog.added %} ### Added {% for item in structured_changelog.added %} - {{ item }} {% endfor %} {% endif %}   {% if structured_changelog.changed %} ### Changed {% for item in structured_changelog.changed %} - {{ item }} {% endfor %} {% endif %}   {% if structured_changelog.fixed %} ### Fixed {% for item in structured_changelog.fixed %} - {{ item }} {% endfor %} {% endif %}   {% if structured_changelog.removed %} ### Removed {% for item in structured_changelog.removed %} - {{ item }} {% endfor %} {% endif %}   {% if structured_changelog.deprecated %} ### Deprecated {% for item in structured_changelog.deprecated %} - {{ item }} {% endfor %} {% endif %}   {% if structured_changelog.security %} ### Security {% for item in structured_changelog.security %} - {{ item }} {% endfor %} {% endif %} """
domain = "answer" definition = "The domain for questions and answers"   [concept] Answer = "An answer to a question" Question = "A question to a problem" EnrichedQuestion = "An enriched question"   [pipe] [pipe.retrieve_then_answer] PipeSequence = "Answer a question, given the target type and the excerpts neeeded to answer it" inputs = { question = "answer.Question", text = "Text", client_instructions = "Text" } output = "Dynamic" steps = [ { pipe = "write_context_of_text", result = "context" }, { pipe = "retrieve_excerpts", result = "excerpts" }, { pipe = "enrich_question", result = "enriched_question" }, { pipe = "answer_question", result = "answer" }, ]   [pipe.answer_question] PipeSequence = "Answer the question in a dynamically specified format" inputs = { enriched_question = "EnrichedQuestion", client_instructions = "Text", context = "Text", excerpts = "retrieve.RetrievedExcerpt" } output = "Dynamic" steps = [ { pipe = "pre_answer_question", result = "answer" }, { pipe = "cleanse_answer", result = "cleaned_answer" }, ]   [pipe.write_context_of_text] PipeLLM = "Write the context of a sample of text" inputs = { text = "Text" } output = "Text" prompt_template = """ Your task is to write the context of a text. This context should be maximum of 30 words. The goal is to quickly understand the type of ducument by just reding this context.   @text """   [pipe.enrich_question] PipeLLM = "Get an enriched question" inputs = { question = "answer.Question", client_instructions = "Text", context = "Text" } output = "EnrichedQuestion" llm = "llm_to_enrich" prompt_template = """ Your task is to reformulate a form field or a question into a question for a LLM. This question will need an answer from a text.   @context   Here is the question/field name: '$question'. Each word is important therefore do not extrapolate or create information.   {% if client_instructions %} Here are important instructions from the customer to take into account in order to enrich the question. The client instructions are important and you absolutely must follow them. However, it DOES NOT contain the answer. @client_instructions {% endif %}   Here is the main task: If I were to prompt an LLM to extract this information from a specific section of the contract, what should I ask?   Here are some rules that you absolutely must follow: - No need to add instructions like "based on the provided contract", just write the question in English, no need for code. - No need for intros like "Here is a reformulated question", just write the question. - It is important that you specify that the question is a Yes/No question if it is the case. """   [pipe.pre_answer_question] PipeLLM = "Answer the question in a dynamically specified format" inputs = { enriched_question = "EnrichedQuestion", excerpts = "retrieve.RetrievedExcerpt", context = "Text", client_instructions = "Text" } output = "Dynamic" llm = "llm_to_answer" structuring_method = "preliminary_text" prompt_template = """ Your task is to answer a question based on excerpts previously retrieved from a text. To help you, your assistant has already enriched the question and extracted the most relevant excerpts{% if client_instructions %}, and provided you with some hints or guidelines from the customer{% endif %}.   @context   @enriched_question   @excerpts Not all of of the exceprts are necessarily relevant to the question, but all of them are relevant to the contract.   {% if client_instructions %} Here are important instructions from the customer to take into account in order to enrich the question. The client instructions are important and you absolutely must follow them. However, it DOES NOT contain the answer. @client_instructions {% endif %}   Important rules for answering: - For Yes/No questions: Answer "NO" if no excerpts or inconclusive evidence (with explanation) are provided. - For multiple choice questions: Mark as "indeterminate" if no excerpts or inconclusive evidence (with explanation) are provided. - Always cite the answer with citations EXCEPT when the answer is "indeterminate" - When evidence is clear: Provide answer with citations - When no answer is applicable, or the answer says that its not applicable, mark as "not_applicable" with explanation. - If the target_format is FreeText, it must be a text. - [IMPORTANT] DO NOT add commentaries like "Based on.. According to...", just output the answer. - [IMPORTANT] DO NOT extrapolate or create information. Base your answer solely on the provided excerpts. - Please, cite the exact sentences that you used to answer the question in a "citation" paragraph. - Make sure that you also cite the clause number if provided (20.1 for instance).   Here is the fields format of the answer you must output: """   [pipe.cleanse_answer] PipeLLM = "Clean the answer" inputs = { answer = "Dynamic" } output = "Dynamic" structuring_method = "preliminary_text" prompt_template = """ You are helping to clean answers that were generated from analyzing document excerpts to answer specific questions.   @answer   Your task is to clean the answer by handling cases where no clear answer could be found in the document excerpts.   ONLY output the cleaned answer - do not add any explanation or commentary.   If the answer contains any of these patterns, output "Indeterminate": - Empty or blank answers (including empty JSON objects) - Statements indicating no relevant information was found - Phrases like: * "The excerpts are not relevant to the question" * "There is nothing relevant in the document to answer" * "Based on the document, there is nothing..." * "No information found in the document" * "Cannot determine from the provided excerpts" * "No relevant excerpts were found"   Important rules: - Keep "NO" answers unchanged - Keep "not_applicable" or "indeterminate" answers unchanged - Preserve all other valid answers exactly as they are - DO NOT add any explanation or commentary to your output """
domain = "extract_generic"   [pipe] [pipe.power_extractor] PipeSequence = "Update page content with markdown" inputs = { ocr_input = "PDF" } output = "Text" steps = [ { pipe = "ocr_page_contents_and_views_from_pdf", result = "page_contents" }, { pipe = "write_markdown_from_page_content", batch_over = "page_contents", batch_as = "page_content", result = "markdowns" }, ]   [pipe.write_markdown_from_page_content] PipeLLM = "Write markdown from page content" inputs = { "page_content.page_view" = "Image", page_content = "Page" } output = "Text" llm = "llm_for_img_to_text" system_prompt = "You are a multimodal LLM, expert at converting images into perfect markdown." prompt_template = """ You are given an image which is a view of a document page. You are also given the text extracted from the page by an OCR model. Your task is to output the perfect markdown of the page.   Here is the text extracted from the page: {{ page_content.text_and_images.text.text|tag("ocr_text") }}   - Ensure you do not miss any information from the page. The text extracted from the page is not always complete. Your task is to complete the text and add the missing information using the page view. - Output only the markdown, nothing else. No need for "```markdown" or "```". - In case of diagrams, charts, visualizations, etc. with text inside (the text may not appear in the input text - the OCR fails to extract it), make sure to include the text in the markdown. Feel free to choose the most appropriate markdown element to do so. """

Manifesto

Agents can remember facts. They can't remember methods. Every solution they discover, every process they perfect: gone the moment the task ends. Knowledge without knowhow is half a mind.

Join us in building the open standard that gives agents memory of methods. Portable workflows they can write, improve, and share. Help us enable agents to build lasting automation for every business, not just one-off answers.

The infrastructure for scaling repeatable AI work starts here.

Agent-First + Open Standard changes everything

Without Pipelex

❌  Human work to create each new workflow
❌  AI usage is technical, reflecting API
❌  Workflows trapped in LangGraph/n8n/custom code
❌  Every team rebuilds the same workflows from scratch

With Pipelex​​​

✅  Agent creates workflow from natural language
✅  AI use with high level of abstraction, reflecting use case
✅  Open standard to run anywhere, no vendor lock-in
✅  Shared community pipelines are your building blocks

Tagline

Cursor and Pipelex

Explore how Cursor can create a whole Pipelex pipeline on its own.

Medium length section heading goes here

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse varius enim in eros elementum tristique. Duis cursus, mi quis viverra ornare, eros dolor interdum nulla.

Medium length section heading goes here

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse varius enim in eros elementum tristique. Duis cursus, mi quis viverra ornare, eros dolor interdum nulla.

Medium length section heading goes here

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse varius enim in eros elementum tristique. Duis cursus, mi quis viverra ornare, eros dolor interdum nulla.

Build Once, Run Anywhere

Our plugin system enables the use of any AI model
Pipelex runs anywhere thanks to Open-source + API + MCP

Thank you! Your submission has been received!
Oops! Something went wrong while submitting the form.

They support us

Google for Startups

FAQs

Find answers to your most pressing questions about knowledge pipelines and AI workflows.

Why invent a new language?

AI workflows represent a new computing paradigm that demands a new approach. While natural language is great for human communication, it's too ambiguous for reliable automation. To collaborate with AI, we need a declarative language that captures domain expertise directly, preserving human intent while providing the required structure to orchestrate AI consistently. Think of it as the middle ground between vague English prompts and rigid code.

How does Pipelex compare to LangGraph, Mastra, Pydantic AI?

Those tools ask you to write code: they're dev tools for humans. Pipelex asks you to declare intent: it's a devtool for agents first. These frameworks lock you in. Pipelex gives you a portable language that non-tech people can write, any expert can read, and any platform can run. We're not competing on features: we're establishing the standard.

How does Pipelex compare to n8n, Workato, make.com?

Those are visual, drag-and-drop platforms built for humans clicking through GUIs. Pipelex is text-based and built for agents to write workflows autonomously. No-code workflows live trapped in their platforms. Pipelex workflows are portable files you can version control, share, and run anywhere. They focus on app connectors. We focus on AI-powered information processing with structured outputs.

What kind of language is Pipelex?

Pipelex is a declarative, domain-specific language (DSL) for AI workflows. It uses TOML syntax that reads like documentation, not code. Unlike traditional programming languages that express technical implementation, Pipelex operates at a conceptual level: you declare business intent like "extract buyer from invoice" rather than API calls. It's designed to be readable by domain experts, writable by agents, and executable anywhere.

Are you an agent framework or an “agent runtime”?

Neither. Pipelex provides deterministic AI workflows that agents can use as tools. Our workflows generate structured outputs reliably, making them perfect for agent tool use. Agents access Pipelex workflows via MCP (Model Context Protocol) or our API. Think of us as a tool provider for agents: we give them reliable, repeatable methods for information processing tasks.

When is Pipelex the right choice?

Pipelex is the right choice when you need repeatable, deterministic AI workflows for knowledge work. If you're processing invoices, analyzing contracts, or generating reports every week or a thousands of times a day, most of all you need consistent results every time, that's Pipelex. It's not for creative exploration or open-ended tasks, it's for when you've figured out the method and need to run it reliably at scale.

Is Pipelex open-source?

Yes. The core Pipelex language and Python runtime are fully open-source. You can find our repos at github.com/Pipelex/pipelex and our cookbook with examples at github.com/Pipelex/pipelex-cookbook. We follow an open-core model where enterprise features will require a commercial license, but the language itself will always remain open.

Can I extend Pipelex?

Yes, at three levels. First, pipelines are composable: they can call other pipelines as building blocks, so you can build on what others have created. Second, our codebase is modular with plugin systems for your own orchestrators, AI model APIs, cost reporters, and more. Third, it's open-source: fork it, extend it for your needs, and contribute back. The community shapes the standard.

Still have questions?

We're here to help!