Document Processing Pipeline
A complete v0.5.1 example demonstrating the Cognitive Hive architecture with specialists, shared SLM, and HiveMnemonic for document processing.
// Types
struct Document {
id: String,
content: String,
metadata: Map<String, String>
}
struct ProcessedDoc {
id: String,
summary: String,
entities: List<Entity>,
sentiment: Sentiment
}
struct Entity {
text: String,
entity_type: String,
confidence: f64
}
enum Sentiment { Positive, Neutral, Negative }
// Specialist: Document Summarization
specialist Summarizer {
domain: "document summarization",
anima: {
purpose: "Create concise document summaries",
beliefs: { revision_threshold: 30 }
},
receive Summarize(doc: Document) -> String {
self.anima.remember("Processing doc: {doc.id}")
infer("Summarize in 2-3 sentences:\n\n{doc.content}")
}
}
// Specialist: Entity Extraction
specialist Extractor {
domain: "entity extraction",
anima: {
purpose: "Extract named entities from documents",
beliefs: { revision_threshold: 30 }
},
receive Extract(doc: Document) -> List<Entity> {
let entities = infer_typed<List<Entity>>(
"Extract all named entities:\n\n{doc.content}"
)
// Share findings with hive
for entity in entities {
hive.mnemonic.learn("Found: {entity.text} ({entity.entity_type})")
}
entities
}
}
// Specialist: Sentiment Analysis
specialist SentimentAnalyzer {
domain: "sentiment analysis",
anima: {
purpose: "Analyze document sentiment",
beliefs: { revision_threshold: 30 }
},
receive Analyze(doc: Document) -> Sentiment {
infer_typed<Sentiment>(
"Classify sentiment as Positive, Neutral, or Negative:\n\n{doc.content}"
)
}
}
// Document Processing Hive - v0.5.1 Architecture
hive DocumentPipeline {
specialists: [Summarizer, Extractor, SentimentAnalyzer],
// ONE shared SLM for all specialists
slm: "simplex-cognitive-7b",
// Shared consciousness stores processed knowledge
mnemonic: {
episodic: { capacity: 2000 },
semantic: { capacity: 10000 },
beliefs: { revision_threshold: 50 }
},
router: SemanticRouter(
embedding_model: "simplex-mnemonic-embed"
),
strategy: OneForOne,
max_restarts: 5,
// Process a document through all specialists
receive Process(doc: Document) -> ProcessedDoc {
// Parallel processing by all specialists
let (summary, entities, sentiment) = await parallel(
ask(Summarizer, Summarize(doc)),
ask(Extractor, Extract(doc)),
ask(SentimentAnalyzer, Analyze(doc))
)
// Store result in shared mnemonic
hive.mnemonic.learn("Processed {doc.id}: {summary}")
ProcessedDoc { id: doc.id, summary, entities, sentiment }
}
// Search processed documents via mnemonic
receive Search(query: String) -> List<String> {
hive.mnemonic.recall_for(query, max_results: 10)
}
}
// Main entry point
fn main() {
let pipeline = spawn DocumentPipeline
// Process a document
let doc = Document {
id: "doc-001",
content: "Simplex is a new programming language...",
metadata: {}
}
let result = ask(pipeline, Process(doc))
print("Summary: {result.summary}")
print("Entities: {result.entities.len()}")
print("Sentiment: {result.sentiment}")
// Search via shared mnemonic
let results = ask(pipeline, Search("programming languages"))
print("Found {results.len()} related memories")
}
AI Chatbot with Anima Memory
A conversational AI specialist that uses the Anima cognitive system for persistent memory and learning across conversations.
// Cognitive Chatbot Specialist
specialist Mellon { // "Friend" in Sindarin
domain: "conversational assistance",
// Anima provides cognitive memory
anima: {
purpose: "Help users with friendly, contextual responses",
personality: "Helpful, patient, and remembers user preferences",
beliefs: { revision_threshold: 30 },
memory: {
episodic: { capacity: 500 }, // Remember conversations
semantic: { capacity: 1000 }, // Learn facts about user
working: { capacity: 10 } // Current context
},
persistence: { auto_save: true }
},
receive Chat(message: String) -> String {
// Remember this interaction
self.anima.remember("User said: {message}", importance: 0.6)
// Push to working memory for immediate context
self.anima.working.push("Current message: {message}")
// Recall relevant memories for this query
let context = self.anima.recall_for(message, max_results: 5)
// Think about the response (uses Hive SLM with Anima context)
let response = self.anima.think(
"How should I respond to: {message}",
context: context
)
// Learn from successful interactions
if response.confidence > 0.8 {
self.anima.learn(
"Good response pattern for this type of query",
confidence: response.confidence
)
}
// Remember the response
self.anima.remember("I responded: {response.content}", importance: 0.5)
response.content
}
receive LearnPreference(preference: String) {
// Form a belief about user preference
self.anima.believe(preference, confidence: 0.8)
}
receive Consolidate -> i64 {
// Prune low-importance memories, return count
self.anima.consolidate()
}
}
// Hive with conversational specialist
hive AssistantHive {
specialists: [Mellon],
slm: "simplex-cognitive-7b",
mnemonic: {
episodic: { capacity: 2000 },
semantic: { capacity: 5000 },
beliefs: { revision_threshold: 50 }
},
strategy: OneForOne
}
Knowledge Retrieval with HiveMnemonic
A knowledge-base system using the HiveMnemonic for shared memory across specialists. In v0.5.1, retrieval is built into the cognitive architecture.
// Specialist: Knowledge Ingestion
specialist Isto { // "Knowledge" in Sindarin
domain: "knowledge ingestion and learning",
anima: {
purpose: "Ingest and organize knowledge for the hive",
beliefs: { revision_threshold: 30 }
},
receive Ingest(content: String, source: String) {
// Split into chunks
let chunks = split_into_chunks(content, 500)
// Learn each chunk into the HiveMnemonic (shared memory)
for chunk in chunks {
hive.mnemonic.learn(
"[{source}] {chunk}",
confidence: 0.9
)
}
// Remember this ingestion event
self.anima.remember(
"Ingested {chunks.len()} chunks from {source}",
importance: 0.7
)
// Form belief about the source
hive.mnemonic.believe(
"Source '{source}' contains relevant knowledge",
confidence: 0.85
)
}
}
// Specialist: Knowledge Retrieval & Response
specialist Thind { // "Grey/Wise" in Sindarin
domain: "knowledge retrieval and synthesis",
anima: {
purpose: "Answer questions using hive knowledge",
beliefs: { revision_threshold: 30 }
},
receive Ask(question: String) -> String {
// Recall relevant knowledge from HiveMnemonic
let knowledge = hive.mnemonic.recall_for(question, max_results: 5)
// Recall personal experience with similar questions
let experience = self.anima.recall_for(question, max_results: 3)
// Generate answer (Anima + Mnemonic context flows to SLM)
let answer = infer("
Based on your knowledge, answer this question.
Question: {question}
Provide a clear, accurate answer.")
// Remember this Q&A for future reference
self.anima.remember(
"Answered: {question}",
importance: 0.6
)
answer
}
}
// Knowledge Hive with shared consciousness
hive KnowledgeHive {
specialists: [Isto, Thind],
// Shared SLM for all specialists
slm: "simplex-cognitive-7b",
// HiveMnemonic stores all ingested knowledge
mnemonic: {
episodic: { capacity: 5000 }, // Events and interactions
semantic: { capacity: 50000 }, // Facts and knowledge chunks
beliefs: { revision_threshold: 50 }
},
router: SemanticRouter(
embedding_model: "simplex-mnemonic-embed"
),
strategy: OneForOne
}
// Usage
fn main() {
let hive = spawn KnowledgeHive
// Ingest documents
ask(hive.Isto, Ingest(doc_content, "manual.pdf"))
// Ask questions - mnemonic provides context automatically
let answer = ask(hive.Thind, Ask("How do I configure the system?"))
print(answer)
}
Cognitive Hive Example
A complete CHAI v0.5.1 hive for document analysis. All specialists share one SLM and contribute to the HiveMnemonic shared consciousness.
// Specialist: Summarization
specialist Isto { // "Knowledge" in Sindarin
domain: "text summarization",
// Personal Anima configuration
anima: {
purpose: "Create concise, accurate summaries",
beliefs: { revision_threshold: 30 }
},
receive Summarize(text: String) -> String {
// Remember this task in personal Anima
self.anima.remember("Summarized document of {text.len()} chars")
// infer() uses shared Hive SLM with Anima + Mnemonic context
let summary = infer("Summarize concisely:\n\n{text}")
// Share the summary with the hive
hive.mnemonic.learn("Document summary: {summary}")
summary
}
}
// Specialist: Entity Extraction
specialist Curu { // "Skill/Craft" in Sindarin
domain: "named entity recognition",
anima: {
purpose: "Extract structured data from text",
beliefs: { revision_threshold: 30 }
},
receive Extract(text: String) -> List<Entity> {
let raw = infer("Extract named entities (people, orgs, locations):\n\n{text}")
let entities = parse_entities(raw)
// Share discovered entities with the hive
for entity in entities {
hive.mnemonic.learn("Found entity: {entity.name} ({entity.type})")
}
entities
}
}
// Specialist: Sentiment Analysis
specialist Silma { // "Crystal/Clarity" in Sindarin
domain: "sentiment analysis",
anima: {
purpose: "Understand emotional tone and sentiment",
beliefs: { revision_threshold: 30 }
},
receive Analyze(text: String) -> SentimentResult {
infer_typed<SentimentResult>(
"Analyze sentiment (positive/negative/neutral) with confidence:\n\n{text}"
)
}
}
// Specialist: Topic Classification
specialist Penna { // "Teller/Writer" in Sindarin
domain: "topic classification",
anima: {
purpose: "Categorize content by topic",
beliefs: { revision_threshold: 30 }
},
receive Classify(text: String) -> List<Topic> {
infer_typed<List<Topic>>(
"Classify into relevant topics:\n\n{text}"
)
}
}
// The Hive - v0.5.1 Architecture
hive DocumentAnalyzer {
specialists: [Isto, Curu, Silma, Penna],
// ONE shared SLM for all specialists (v0.5.1)
slm: "simplex-cognitive-7b",
// Shared consciousness - replaces vector store
mnemonic: {
episodic: { capacity: 1000, importance_threshold: 0.4 },
semantic: { capacity: 5000 },
beliefs: { revision_threshold: 50 }, // 50% for hive beliefs
},
router: SemanticRouter(
embedding_model: "simplex-mnemonic-embed"
),
strategy: OneForOne,
max_restarts: 3,
// Analyze a document using all specialists
receive AnalyzeDocument(doc: String) -> DocumentAnalysis {
let (summary, entities, sentiment, topics) = await parallel(
ask(Isto, Summarize(doc)),
ask(Curu, Extract(doc)),
ask(Silma, Analyze(doc)),
ask(Penna, Classify(doc))
)
// Store analysis result in shared mnemonic
hive.mnemonic.learn("Analyzed document with {entities.len()} entities")
DocumentAnalysis { summary, entities, sentiment, topics }
}
}
Simple CRM
A basic CRM system demonstrating data management with AI-enhanced features.
struct Contact {
id: String,
name: String,
email: String,
company: Option<String>,
notes: List<Note>,
score: f64 // AI-generated lead score
}
struct Note {
timestamp: DateTime,
content: String,
sentiment: Sentiment
}
actor ContactManager {
var contacts: Map<String, Contact> = {}
receive AddContact(name: String, email: String) -> Contact {
let contact = Contact {
id: generate_id(),
name,
email,
company: None,
notes: [],
score: 0.5
}
contacts.insert(contact.id, contact)
checkpoint()
contact
}
receive AddNote(contact_id: String, content: String) -> Result<Note, Error> {
let contact = contacts.get_mut(contact_id)?
// AI-analyze the note
let sentiment = await ai::classify<Sentiment>(content)
let note = Note {
timestamp: DateTime::now(),
content,
sentiment
}
contact.notes.push(note)
// Update lead score based on interaction history
contact.score = calculate_lead_score(contact)
checkpoint()
Ok(note)
}
receive GetSuggestedResponse(contact_id: String) -> String {
let contact = contacts.get(contact_id)?
let history = contact.notes
.map(n => n.content)
.join("\n")
await ai::complete("
Based on this interaction history with {contact.name}:
{history}
Suggest a professional follow-up message:")
}
receive GetTopLeads(limit: i64) -> List<Contact> {
contacts
.values()
.sort_by(c => c.score, descending)
.take(limit)
}
}
fn calculate_lead_score(contact: &Contact) -> f64 {
let interaction_count = contact.notes.len() as f64
let positive_ratio = contact.notes
.filter(n => n.sentiment == Sentiment::Positive)
.len() as f64 / interaction_count.max(1.0)
(interaction_count.min(10.0) / 10.0) * 0.4 + positive_ratio * 0.6
}