Basics

Variables

variables.sx
// Immutable binding (default)
let name = "Simplex"
let count = 42
let pi = 3.14159

// Mutable binding
var counter = 0
counter = counter + 1

// Type annotation (optional with inference)
let explicit: String = "typed"

// Constants (compile-time)
const MAX_SIZE = 1000

Comments

comments.sx
// Single line comment

/* Multi-line
   comment */

/// Documentation comment (for sxdoc)
fn documented() { }

Types

Primitive Types

Type Description Example
i64, i32 Signed integers 42, -17
f64, f32 Floating point 3.14, 1.0e-5
Bool Boolean true, false
String UTF-8 text "hello"
Char Unicode character 'a', '\n'

Collection Types

collections.sx
// List - ordered, growable
let numbers: List<i64> = [1, 2, 3]

// Map - key-value pairs
let config: Map<String, String> = {
    "host": "localhost",
    "port": "8080",
}

// Set - unique elements
let tags: Set<String> = {"ai", "rust", "web"// Tuple - fixed size, mixed types
let pair: (String, i64) = ("age", 30)

Functions

functions.sx
// Basic function
fn greet(name: String) -> String {
    "Hello, {name}!"
}

// Multiple parameters
fn add(a: i64, b: i64) -> i64 {
    a + b
}

// No return value
fn log(message: String) {
    println(message)
}

// Generic function
fn first<T>(items: List<T>) -> Option<T> {
    items.first()
}

// Named parameters
fn connect(host: String, port: i64, timeout: i64 = 30) { }
connect("localhost", port: 8080)

// Lambda / closure
let double = |x| x * 2
let sum = |a, b| a + b

Control Flow

Conditionals

conditionals.sx
// If expression
let status = if count > 0 { "positive" } else { "zero or negative" }

// If-else chain
if score >= 90 {
    "A"
} else if score >= 80 {
    "B"
} else {
    "C"
}

Pattern Matching

matching.sx
// Match expression
match status {
    Status::Active => handle_active(),
    Status::Pending => handle_pending(),
    Status::Error(msg) => handle_error(msg),
    _ => handle_default(),
}

// Match with guards
match value {
    n if n > 100 => "large",
    n if n > 0 => "positive",
    0 => "zero",
    _ => "negative",
}

// Destructuring
match point {
    Point { x: 0, y } => println("On y-axis at {y}"),
    Point { x, y: 0 } => println("On x-axis at {x}"),
    Point { x, y } => println("At ({x}, {y})"),
}

Loops

loops.sx
// For loop
for item in items {
    process(item)
}

// Range iteration
for i in 0..10 {
    println(i)
}

// While loop
while condition {
    do_work()
}

// Loop with break
loop {
    if done { break }
    process()
}

Structs & Enums

Structs

structs.sx
// Define a struct
struct User {
    name: String,
    email: String,
    age: i64,
}

// Create instance
let user = User {
    name: "Alice",
    email: "alice@example.com",
    age: 30,
}

// Access fields
println(user.name)

// Methods via impl
impl User {
    fn display_name(&self) -> String {
        "{self.name} <{self.email}>"
    }
}

Enums

enums.sx
// Simple enum
enum Status {
    Active,
    Pending,
    Completed,
}

// Enum with data
enum Message {
    Text(String),
    Image { url: String, width: i64 },
    Quit,
}

// Generic enum (Result, Option)
enum Option<T> {
    Some(T),
    None,
}

enum Result<T, E> {
    Ok(T),
    Err(E),
}

Specialists

Specialists are cognitive agents with an Anima (memory, beliefs, intentions). They replace the older actor syntax in v0.5.0+.

specialist.sx
specialist Summarizer {
    // Cognitive core configuration
    anima: {
        episodic: { capacity: 500 },
        semantic: { capacity: 2000 },
        beliefs: { revision_threshold: 30 },
    },

    // Internal state
    var processed_count: i64 = 0,

    // Message handler
    receive Summarize(doc: Document) -> Summary {
        // Recall relevant past experiences
        let context = self.anima.recall_for(doc.topic, limit: 5)

        // Perform inference with context
        let summary = infer(doc.content, context: context)

        // Remember this experience
        self.anima.remember(doc.id, summary)
        self.processed_count += 1

        checkpoint()
        summary
    }

    // Regular method
    fn stats(&self) -> Stats {
        Stats { count: self.processed_count }
    }
}

Spawning Specialists

spawning.sx
// Spawn a specialist
let summarizer = spawn Summarizer

// Send message and wait for response
let result = ask(summarizer, Summarize(doc))

// Fire-and-forget message
send(summarizer, Summarize(doc))

// Stop a specialist
stop(summarizer)

Anima

The Anima is the cognitive core of each specialist, providing persistent memory, beliefs, and intentions.

anima.sx
// Memory operations
self.anima.remember(key, value)
self.anima.remember(key, value, type: MemoryType::Semantic)
let memories = self.anima.recall_for(query, limit: 5)
let similar = self.anima.recall_similar(embedding, limit: 10)
self.anima.forget(key)

// Belief management
self.anima.believe("user_preference", value, confidence: 0.85)
let belief = self.anima.beliefs.get("user_preference")
self.anima.revise("belief_key", new_value, evidence: 0.9)

// Check confidence threshold
if self.anima.beliefs.confident("key", threshold: 0.7) {
    // Act on confident belief
}

Anima Configuration

Property Description Default
episodic.capacity Max episodic memories 1000
semantic.capacity Max semantic memories 5000
beliefs.revision_threshold % confidence to revise 30

Hives

Hives organize specialists into collaborative groups that share a single SLM and collective memory through HiveMnemonic.

hive.sx
hive DocumentProcessing {
    // Shared SLM for all specialists (4.1 GB total)
    slm: "simplex-cognitive-7b",

    // Shared consciousness (HiveMnemonic)
    mnemonic: {
        episodic: { capacity: 2000 },
        semantic: { capacity: 10000 },
        beliefs: { revision_threshold: 50 },
    },

    // Specialists in this hive
    specialists: [Extractor, Summarizer, Classifier],

    // Routing strategy
    routing: RoutingStrategy::Semantic,
}

HiveMnemonic Operations

mnemonic.sx
// Share knowledge with the hive
hive.mnemonic.learn(insight, confidence: 0.75)

// Query shared knowledge
let shared = hive.mnemonic.recall_for(query, limit: 10)

// Get shared beliefs
let hive_belief = hive.mnemonic.beliefs.get("project_deadline")

// Promote individual memory to shared
hive.mnemonic.promote(self.anima.recall("discovery"))

Inference

The infer() function uses the hive's shared SLM for AI operations.

inference.sx
// Basic inference
let response = infer(prompt)

// With memory context
let memories = self.anima.recall_for(query, limit: 5)
let response = infer(prompt, context: memories)

// Structured extraction
struct Invoice { vendor: String, amount: f64 }
let invoice = infer<Invoice>(document)

// Classification
enum Sentiment { Positive, Negative, Neutral }
let sentiment = infer<Sentiment>(text)

// With options
let response = infer(prompt,
    context: memories,
    system: "You are an analyst.",
    temperature: 0.7,
    max_tokens: 500,
)

Neural Gates

Neural Gates (v0.6.0+) introduce learnable control flow into Simplex. Unlike traditional conditionals, gates are differentiable decision points that can be trained from data and compile to zero-overhead branches for production inference.

Basic Neural Gate

neural_gate.sx
// Define a learnable decision gate
neural_gate route_request(features: Tensor) -> RouteDecision {
    // Gate learns which branch to take based on input features
    branch fast_path {
        quick_process(features)
    }
    branch detailed_path {
        full_analysis(features)
    }
}

// Use the gate in your code
let result = route_request(input_features)

Compilation Modes

Neural gates behave differently depending on compilation mode:

Mode Behavior Usage
train Gates differentiable, gradients flow through all branches sxc --mode train
infer Gates frozen, compiles to zero-overhead conditionals sxc --mode infer
profile Track gate decisions with runtime statistics sxc --mode profile

Categorical Gates

categorical_gate.sx
// Multi-way classification gate
neural_gate classify_intent(text: String) -> Intent {
    branch question   { Intent::Question }
    branch command    { Intent::Command }
    branch statement  { Intent::Statement }
    branch greeting   { Intent::Greeting }
}

// With confidence threshold
neural_gate detect_anomaly(metrics: Tensor) -> AnomalyResult {
    threshold: 0.85,  // Minimum confidence to commit
    branch normal   { AnomalyResult::Normal }
    branch anomaly  { AnomalyResult::Detected }
    fallback        { AnomalyResult::Uncertain }
}

Gate Contracts

Contracts provide safety guarantees for neural gate behavior:

gate_contracts.sx
neural_gate route_payment(amount: f64, risk: Tensor) -> PaymentRoute {
    // Precondition: input constraints
    requires amount > 0.0
    requires risk.is_valid()

    // Postcondition: output guarantees
    ensures result.is_auditable()
    ensures result.fee >= 0.0

    // Guaranteed fallback if gate fails
    fallback PaymentRoute::ManualReview

    branch instant {
        PaymentRoute::Instant { fee: amount * 0.001 }
    }
    branch standard {
        PaymentRoute::Standard { fee: 0.50 }
    }
    branch review {
        PaymentRoute::ManualReview
    }
}

Gate Training

gate_training.sx
import simplex_learning::{OnlineLearner, StreamingAdam}

// Train gates from labeled data
let learner = OnlineLearner::new(
    optimizer: StreamingAdam { lr: 0.001 }
)

for (input, expected) in training_data {
    let output = route_request(input)
    let loss = cross_entropy(output, expected)
    learner.step(loss)
}

// Export trained gate weights
save_gate_weights("route_request.weights")

Real-Time Learning

Real-Time Learning (v0.7.0+) enables specialists to adapt during runtime without offline training. The simplex-learning library provides streaming optimizers, safety constraints, and federated learning across hives.

OnlineLearner

online_learner.sx
import simplex_learning::{OnlineLearner, StreamingAdam}

specialist AdaptiveClassifier {
    var learner: OnlineLearner,

    fn init() {
        self.learner = OnlineLearner::new(
            optimizer: StreamingAdam { lr: 0.001 },
            window_size: 100,
        )
    }

    receive Classify(input: Tensor) -> Class {
        self.learner.forward(input)
    }

    receive Feedback(input: Tensor, correct: Class) {
        // Learn from user correction in real-time
        let loss = self.learner.compute_loss(input, correct)
        self.learner.step(loss)
    }
}

Streaming Optimizers

Optimizer Description Use Case
StreamingAdam Adam with bounded memory for moment estimates General-purpose, adaptive learning rate
StreamingSGD SGD with momentum, constant memory Fast updates, predictable behavior
StreamingAdamW AdamW with decoupled weight decay Prevent catastrophic forgetting
optimizers.sx
import simplex_learning::{StreamingAdam, StreamingSGD, StreamingAdamW}

// Streaming Adam with custom parameters
let adam = StreamingAdam {
    lr: 0.001,
    beta1: 0.9,
    beta2: 0.999,
    epsilon: 1e-8,
}

// SGD with momentum
let sgd = StreamingSGD {
    lr: 0.01,
    momentum: 0.9,
}

// AdamW for long-running adaptation
let adamw = StreamingAdamW {
    lr: 0.001,
    weight_decay: 0.01,
}

Safety Constraints

SafeLearner wraps any learner with runtime safety bounds:

safe_learning.sx
import simplex_learning::{OnlineLearner, SafeLearner, SafeFallback}

// Wrap learner with safety bounds
let safe_learner = SafeLearner::new(
    learner: OnlineLearner::new(optimizer: StreamingAdam { lr: 0.001 }),

    // Maximum parameter change per step
    max_delta: 0.1,

    // Rollback if validation degrades
    validation_threshold: 0.95,

    // Keep checkpoint for recovery
    checkpoint_interval: 100,
)

// Define fallback behavior
let fallback = SafeFallback {
    // Return to last known good state
    on_divergence: FallbackAction::Rollback,

    // Alert on repeated failures
    on_repeated_failure: FallbackAction::Alert,

    // Maximum consecutive failures before halt
    max_failures: 5,
}

Federated Learning

FederatedLearner enables knowledge sharing across hives while preserving privacy:

federated.sx
import simplex_learning::{FederatedLearner, FederatedConfig}

// Configure federated learning across hives
let federated = FederatedLearner::new(
    config: FederatedConfig {
        // Aggregate gradients, not raw data
        aggregation: Aggregation::SecureAverage,

        // Differential privacy budget
        epsilon: 1.0,

        // Minimum participants for aggregation
        min_participants: 3,

        // Sync frequency
        sync_interval: Duration::seconds(60),
    }
)

// Local update
let local_gradients = learner.compute_gradients(batch)

// Share with federation (privacy-preserving)
federated.contribute(local_gradients)

// Receive aggregated updates from other hives
let global_update = federated.receive_aggregate()
learner.apply_update(global_update)

Learning in Specialists

learning_specialist.sx
import simplex_learning::{OnlineLearner, SafeLearner, StreamingAdam}

specialist RecommendationEngine {
    anima: {
        episodic: { capacity: 1000 },
        semantic: { capacity: 5000 },
    },

    var learner: SafeLearner,
    var interaction_count: i64 = 0,

    fn init() {
        self.learner = SafeLearner::new(
            learner: OnlineLearner::new(
                optimizer: StreamingAdam { lr: 0.0001 }
            ),
            max_delta: 0.05,
            validation_threshold: 0.90,
        )
    }

    receive Recommend(user_id: String, context: Context) -> List<Item> {
        // Recall user preferences
        let history = self.anima.recall_for(user_id, limit: 20)

        // Generate recommendations using learned model
        self.learner.forward(context, history)
    }

    receive UserClicked(user_id: String, item: Item) {
        // Positive feedback - learn from interaction
        let loss = self.learner.compute_loss(item, positive: true)
        self.learner.step(loss)

        // Update user memory
        self.anima.remember(user_id, item, type: MemoryType::Episodic)
        self.interaction_count += 1

        checkpoint()
    }

    receive UserDismissed(user_id: String, item: Item) {
        // Negative feedback
        let loss = self.learner.compute_loss(item, positive: false)
        self.learner.step(loss)
    }
}

Error Handling

Option and Result

errors.sx
// Option for nullable values
let maybe: Option<User> = find_user(id)
match maybe {
    Some(user) => greet(user),
    None => println("User not found"),
}

// Result for fallible operations
let result: Result<Data, Error> = load_file(path)
match result {
    Ok(data) => process(data),
    Err(e) => log_error(e),
}

// Error propagation with ?
fn process() -> Result<Output, Error> {
    let data = load()?      // Returns early if Err
    let parsed = parse(data)?
    Ok(transform(parsed))
}

// Unwrap with default
let value = maybe.unwrap_or(default_value)
let value = result.unwrap_or_else(|e| handle(e))

More Resources

For detailed API documentation, see the API Reference. For hands-on learning, try the Tutorials.