Development Setup

This guide helps you set up a development environment for building workflows with Reflow.

Development Environment

  • IDE: Visual Studio Code or RustRover
  • Version Control: Git
  • Package Manager: Cargo for Rust dependencies
  • Terminal: Modern terminal with good Unicode support

VS Code Extensions

For the best development experience with VS Code:

{
  "recommendations": [
    "rust-lang.rust-analyzer",
    "vadimcn.vscode-lldb",
    "serayuzgur.crates",
    "tamasfe.even-better-toml",
    "ms-vscode.vscode-json"
  ]
}

Project Structure

Creating a New Reflow Project

# Create a new Rust project
cargo new my-reflow-app
cd my-reflow-app

# Add Reflow dependencies
cargo add reflow_rt
cargo add tokio --features rt-multi-thread,macros
cargo add serde_json anyhow
my-reflow-app/
├── Cargo.toml
├── src/
│   ├── main.rs
│   ├── actors/
│   │   ├── mod.rs
│   │   └── custom_actor.rs
│   ├── workflows/
│   │   ├── mod.rs
│   │   └── data_pipeline.rs
│   └── scripts/
│       ├── process.js
│       └── transform.py
├── config/
│   └── reflow.toml
├── tests/
│   └── integration_tests.rs
└── examples/
    └── basic_workflow.rs

Cargo.toml Configuration

[package]
name = "my-reflow-app"
version = "0.1.0"
edition = "2021"

[dependencies]
reflow_rt = "0.1"
tokio = { version = "1", features = ["rt-multi-thread", "macros"] }
serde_json = "1"
anyhow = "1"

[dev-dependencies]
tokio-test = "0.4"

[[example]]
name = "basic_workflow"
path = "examples/basic_workflow.rs"

Development Workflow

1. Setting Up the Main Application

Create src/main.rs:

use reflow_rt::prelude::*;

fn main() -> Result<(), Box<dyn std::error::Error>> {
    let mut graph = Graph::new("development", false, None);
    graph.add_node("tap", "tpl_passthrough", None);

    let network = Network::with_graph(NetworkConfig::default(), &graph);
    let _ = network;

    Ok(())
}

2. Creating Custom Actors

Create src/actors/mod.rs:

#![allow(unused)]
fn main() {
pub mod custom_actor;

pub use custom_actor::CustomActor;
}

Create src/actors/custom_actor.rs:

#![allow(unused)]
fn main() {
use reflow_rt::actor_runtime::{Actor, ActorBehavior, ActorContext, Port};
use reflow_rt::actor_runtime::message::Message;
use std::collections::HashMap;

pub struct CustomActor {
    inports: Port,
    outports: Port,
}

impl CustomActor {
    pub fn new() -> Self {
        Self {
            inports: flume::unbounded(),
            outports: flume::unbounded(),
        }
    }
}

impl Actor for CustomActor {
    fn get_behavior(&self) -> ActorBehavior {
        Box::new(|context: ActorContext| {
            Box::pin(async move {
                let payload = context.get_payload();
                
                // Your processing logic here
                let result = HashMap::from([
                    ("output".to_string(), Message::String("processed".to_string()))
                ]);
                
                Ok(result)
            })
        })
    }
    
    fn get_inports(&self) -> Port {
        self.inports.clone()
    }
    
    fn get_outports(&self) -> Port {
        self.outports.clone()
    }
    
    fn create_process(&self) -> std::pin::Pin<Box<dyn std::future::Future<Output = ()> + 'static + Send>> {
        // Default implementation from trait
        todo!("Implement process creation")
    }
}
}

3. Organizing Workflows

Create src/workflows/mod.rs:

#![allow(unused)]
fn main() {
pub mod data_pipeline;

pub use data_pipeline::create_data_pipeline;
}

Create src/workflows/data_pipeline.rs:

#![allow(unused)]
fn main() {
use reflow_network::network::Network;
use crate::actors::CustomActor;

pub async fn create_data_pipeline() -> Result<Network, Box<dyn std::error::Error>> {
    let mut network = Network::new();
    
    // Create actors
    let source_actor = CustomActor::new();
    let transform_actor = CustomActor::new();
    let sink_actor = CustomActor::new();
    
    // Add actors to network
    network.add_actor("source", Box::new(source_actor)).await?;
    network.add_actor("transform", Box::new(transform_actor)).await?;
    network.add_actor("sink", Box::new(sink_actor)).await?;
    
    // Connect actors
    network.connect("source", "output", "transform", "input").await?;
    network.connect("transform", "output", "sink", "input").await?;
    
    Ok(network)
}
}

Testing Setup

Unit Tests

Create src/actors/custom_actor.rs with tests:

#![allow(unused)]
fn main() {
#[cfg(test)]
mod tests {
    use super::*;
    use tokio_test;
    
    #[tokio::test]
    async fn test_custom_actor() {
        let actor = CustomActor::new();
        let behavior = actor.get_behavior();
        
        // Create test context
        let payload = HashMap::from([
            ("input".to_string(), Message::String("test".to_string()))
        ]);
        
        // Test behavior
        // Note: You'll need to create proper ActorContext for testing
        let result = behavior(/* test context */).await;
        assert!(result.is_ok());
    }
}
}

Integration Tests

Create tests/integration_tests.rs:

#![allow(unused)]
fn main() {
use my_reflow_app::workflows::create_data_pipeline;

#[tokio::test]
async fn test_data_pipeline() {
    let network = create_data_pipeline().await.unwrap();
    
    // Test the complete workflow
    // Send test data and verify output
}
}

Configuration Management

Environment Configuration

Create config/reflow.toml:

[development]
log_level = "debug"
thread_pool_size = 4

[production]
log_level = "info"
thread_pool_size = 8

[scripting]
deno_permissions = ["--allow-net", "--allow-read"]
python_interpreter = "python3"

[networking]
bind_address = "127.0.0.1:8080"
enable_metrics = true

Loading Configuration

#![allow(unused)]
fn main() {
use serde::{Deserialize, Serialize};
use std::fs;

#[derive(Debug, Deserialize, Serialize)]
struct Config {
    development: Option<EnvConfig>,
    production: Option<EnvConfig>,
    scripting: Option<ScriptingConfig>,
    networking: Option<NetworkingConfig>,
}

#[derive(Debug, Deserialize, Serialize)]
struct EnvConfig {
    log_level: String,
    thread_pool_size: usize,
}

fn load_config() -> Result<Config, Box<dyn std::error::Error>> {
    let config_str = fs::read_to_string("config/reflow.toml")?;
    let config: Config = toml::from_str(&config_str)?;
    Ok(config)
}
}

Development Scripts

Makefile

Create a Makefile for common tasks:

.PHONY: build test run clean docs

build:
	cargo build

test:
	cargo test

run:
	cargo run

clean:
	cargo clean

docs:
	cargo doc --open

check:
	cargo check
	cargo clippy -- -D warnings
	cargo fmt -- --check

dev:
	cargo watch -x run

install-tools:
	cargo install cargo-watch
	cargo install cargo-expand

Development Commands

# Development workflow
make build          # Build the project
make test           # Run tests
make check          # Run linting and formatting checks
make dev            # Run with auto-reload on changes

# Documentation
make docs           # Generate and open documentation
cargo doc --document-private-items  # Include private items

Debugging

Logging Setup

#![allow(unused)]
fn main() {
use tracing::{info, warn, error, debug};
use tracing_subscriber;

// In main.rs
fn init_logging() {
    tracing_subscriber::fmt()
        .with_max_level(tracing::Level::DEBUG)
        .init();
}

// In your actors
debug!("Processing message: {:?}", message);
info!("Actor started successfully");
warn!("High memory usage detected");
error!("Failed to process message: {}", error);
}

Debug Configuration

Add to Cargo.toml:

[profile.dev]
debug = true
debug-assertions = true
overflow-checks = true

[dependencies]
tracing = "0.1"
tracing-subscriber = "0.3"

Using Debugger

For VS Code, create .vscode/launch.json:

{
    "version": "0.2.0",
    "configurations": [
        {
            "type": "lldb",
            "request": "launch",
            "name": "Debug Reflow App",
            "cargo": {
                "args": ["build", "--bin=my-reflow-app"],
                "filter": {
                    "name": "my-reflow-app",
                    "kind": "bin"
                }
            },
            "args": [],
            "cwd": "${workspaceFolder}"
        }
    ]
}

Performance Profiling

Basic Profiling

#![allow(unused)]
fn main() {
use std::time::Instant;

// Time critical sections
let start = Instant::now();
// ... your code
let duration = start.elapsed();
println!("Time elapsed: {:?}", duration);
}

Advanced Profiling Tools

# Install profiling tools
cargo install cargo-profiler
cargo install flamegraph

# Generate flame graphs
cargo flamegraph --bin my-reflow-app

# Memory profiling with valgrind (Linux)
cargo build --release
valgrind --tool=massif ./target/release/my-reflow-app

Continuous Integration

GitHub Actions

Create .github/workflows/ci.yml:

name: CI

on: [push, pull_request]

jobs:
  test:
    runs-on: ubuntu-latest
    steps:
    - uses: actions/checkout@v2
    - uses: actions-rs/toolchain@v1
      with:
        toolchain: stable
    - name: Build
      run: cargo build --verbose
    - name: Run tests
      run: cargo test --verbose
    - name: Check formatting
      run: cargo fmt -- --check
    - name: Run clippy
      run: cargo clippy -- -D warnings

Next Steps

Now that your development environment is set up:

  1. Create your first workflow: First Workflow
  2. Learn about actors: Creating Actors
  3. Explore scripting: Deno Runtime
  4. See examples: Examples

Resources