1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
//! # Anchor Chain
//!
//! Anchor Chain is a Rust framework designed to simplify the orchestration of workflows involving
//! Large Language Models (LLMs). Inspired by LangChain, Anchor Chain provides a set of easy-to-use
//! and extensible building blocks that enable developers to create robust and efficient LLM-based
//! applications quickly. The framework prioritizes type safety, processing efficiency, and
//! flexibility through its carefully designed APIs and abstractions.
//!
//! ## Features
//!
//! - Statically Typed Chains: Anchor Chain leverages Rust's type system to provide statically
//!   typed chains, catching potential type mismatches at compile time.
//!
//! - Async Runtime for Parallel Execution: Built with Rust's async runtime, Anchor Chain allows
//!   for efficient parallel processing of nodes in complex chains.
//!
//! - Extensibility through the Node Trait: The Node trait allows developers to create custom
//!   nodes tailored to their specific use cases, enabling seamless integration into the chain.
//!
//! - Support for Popular LLMs: Anchor Chain provides built-in support for popular LLMs, such as
//!   OpenAI's GPT models and Anthropic Claude, abstracting away API details to provide a common
//!   interface.
//!
//! - Parallel Node Execution: The ParallelNode struct enables parallel execution of multiple
//!   nodes, leveraging concurrency to improve overall chain performance.
//!
//! ## Getting Started
//!
//! To get started with Anchor Chain, add the following dependency to your Cargo.toml file:
//!
//! ```toml
//! [dependencies]
//! anchor-chain = "0.1.0"
//! ```
//!
//! Then, you can create chains using the ChainBuilder and invoke them with the .process()
//! function. Any node can be added to the chain using the link() function which
//! will execute the node in the order it was added.
//!
//! ```rust,no_run
//! #[tokio::main]
//! async fn main() {
//!     use anchor_chain::{
//!         chain::ChainBuilder,
//!         models::openai::OpenAIModel,
//!     };
//!
//!     let chain = ChainBuilder::new()
//!         .link(OpenAIModel::new_gpt3_5_turbo("You are a helpful assistant").await)
//!         .build();
//!
//!     let result = chain
//!         .process("Write a hello world program in Rust")
//!         .await
//!         .expect("Error processing chain");
//!
//!     println!("Result: {}", result);
//! }
//! ```
//!
//! Prompts can be constructed using the `Prompt` struct. `Prompt` uses
//! [Tera](https://keats.github.io/tera/docs/#templates) templating to allow
//! for dynamic input substitution. Tera's syntax is based on Jinja2 and Django
//! templates. Context variables are passed to the prompt using a HashMap.
//!
//! ```rust,no_run
//! use std::collections::HashMap;
//!
//! #[tokio::main]
//! async fn main() {
//!     use anchor_chain::{
//!         chain::ChainBuilder,
//!         models::openai::OpenAIModel,
//!         nodes::prompt::Prompt,
//!     };
//!
//!     let chain = ChainBuilder::new()
//!         .link(Prompt::new("{{ input }}"))
//!         .link(OpenAIModel::new_gpt3_5_turbo("You are a helpful assistant").await)
//!         .build();
//!
//!     let result = chain
//!         .process(HashMap::from([("input", "Write a hello world program in Rust")]))
//!         .await
//!         .expect("Error processing chain");
//!
//!     println!("Result: {}", result);
//! }
//! ```
//!
//! For more examples please refer to the [examples
//! directory](https://github.com/emersonmde/anchor-chain/tree/main/examples).

#[cfg(doctest)]
#[doc = include_str!("../README.md")]
struct _README;

pub mod agents;
pub mod chain;
mod error;
mod link;
mod state_manager;
// TODO: Add impls for Ollama
pub mod models;
pub mod node;
pub mod nodes;
pub mod parallel_node;
pub mod vector;

#[cfg(feature = "bedrock")]
pub use agents::agent_executor::AgentExecutor;
pub use agents::tool_registry::ToolRegistry;
pub use chain::ChainBuilder;
pub use error::AnchorChainError;
pub use link::Link;
pub use node::NoOpNode;
pub use node::Node;
pub use node::Stateless;
pub use nodes::logger::Logger;
pub use nodes::prompt::Prompt;
pub use parallel_node::to_boxed_future;
pub use parallel_node::ParallelNode;
pub use state_manager::StateManager;

#[cfg(feature = "bedrock")]
pub use models::bedrock_converse::BedrockConverse;
#[cfg(feature = "ollama")]
pub use models::ollama::Ollama;
#[cfg(feature = "openai")]
pub use models::openai::OpenAIChatModel;
#[cfg(feature = "openai")]
pub use models::openai::OpenAIEmbeddingModel;
#[cfg(feature = "openai")]
pub use models::openai::OpenAIInstructModel;
#[cfg(feature = "openai")]
pub use models::openai::OpenAIModel;

pub use vector::document::Document;
#[cfg(feature = "opensearch")]
pub use vector::opensearch_client_builder::OpenSearchClientBuilder;
#[cfg(feature = "opensearch")]
pub use vector::opensearch_indexer::OpenSearchIndexer;
#[cfg(feature = "opensearch")]
pub use vector::opensearch_retriever::OpenSearchRetriever;

pub use ctor;