🦀 Serverless Architecture in TuskLang with Rust
Serverless Architecture in TuskLang with Rust
☁️ Serverless Foundation
Serverless architecture with TuskLang and Rust provides the perfect combination for building scalable, cost-effective, and maintainable cloud-native applications. This guide covers AWS Lambda, Azure Functions, and modern serverless patterns.
🏗️ Serverless Architecture
Serverless Principles
[serverless_principles]
event_driven: true
stateless: true
auto_scaling: true
pay_per_use: true[architecture_patterns]
function_as_service: true
backend_as_service: true
event_driven_processing: true
microservices: true
Serverless Components
[serverless_components]
compute: "lambda_functions"
storage: "s3_dynamodb"
messaging: "sqs_sns"
api: "api_gateway"
🔧 AWS Lambda Integration
Lambda Function Configuration
[lambda_config]
runtime: "provided.al2"
memory_size: 512
timeout: 30
environment: "tusk_config"[lambda_function]
name: "tusk-processor"
handler: "bootstrap"
runtime: "provided.al2"
memory_size: 512
timeout: 30
environment:
RUST_LOG: "info"
TUSK_CONFIG: "lambda.tusk"
Lambda Function Implementation
[lambda_implementation]
use lambda_runtime::{service_fn, LambdaEvent, Error};
use serde_json::{json, Value};#[tokio::main]
async fn main() -> Result<(), Error> {
let config = tusk_config::load("lambda.tusk")?;
lambda_runtime::run(service_fn(|event: LambdaEvent<Value>| {
async move {
let payload = event.payload;
let context = event.context;
// Process the event using TuskLang configuration
let result = process_event(&config, &payload).await?;
Ok(json!({
"statusCode": 200,
"body": result
}))
}
}))
.await?;
Ok(())
}
async fn process_event(config: &Config, payload: &Value) -> Result<Value, Error> {
// Use TuskLang configuration for processing
let processor_config = &config.processor;
match payload["type"].as_str() {
Some("data_processing") => {
let result = process_data(&processor_config.data_processing, payload).await?;
Ok(json!({ "result": result }))
}
Some("ml_inference") => {
let result = run_ml_inference(&processor_config.ml, payload).await?;
Ok(json!({ "prediction": result }))
}
_ => Err(Error::from("Unknown event type"))
}
}
TuskLang Lambda Configuration
[lambda_tusk_config]
lambda.tusk
[lambda]
function_name: "@env('AWS_LAMBDA_FUNCTION_NAME')"
memory_size: 512
timeout: 30
environment: "production"[processor]
data_processing:
batch_size: 100
timeout: "10s"
retry_attempts: 3
ml_inference:
model_path: "@file.read('models/model.onnx')"
batch_size: 32
gpu_enabled: false
[storage]
s3_bucket: "@env('S3_BUCKET')"
dynamodb_table: "@env('DYNAMODB_TABLE')"
[monitoring]
cloudwatch_logs: true
xray_tracing: true
custom_metrics: true
🌐 API Gateway Integration
API Gateway Configuration
[api_gateway_config]
rest_api: true
websocket: true
authorization: "jwt"
rate_limiting: true[gateway_routes]
get_users: "GET /users"
create_user: "POST /users"
update_user: "PUT /users/{id}"
delete_user: "DELETE /users/{id}"
[gateway_integration]
lambda_integration: true
cors_enabled: true
request_validation: true
response_mapping: true
API Gateway Handler
[api_gateway_handler]
use aws_lambda_events::apigw::{ApiGatewayProxyRequest, ApiGatewayProxyResponse};
use aws_lambda_events::encodings::Body;
use http::HeaderMap;async fn api_handler(
event: LambdaEvent<ApiGatewayProxyRequest>,
) -> Result<ApiGatewayProxyResponse, Error> {
let config = tusk_config::load("api.tusk")?;
let request = event.payload;
let response = match request.path.as_str() {
"/users" => handle_users(&config, &request).await?,
"/users/{id}" => handle_user_by_id(&config, &request).await?,
_ => ApiGatewayProxyResponse {
status_code: 404,
headers: HeaderMap::new(),
body: Body::Text("Not Found".to_string()),
is_base64_encoded: false,
}
};
Ok(response)
}
async fn handle_users(config: &Config, request: &ApiGatewayProxyRequest) -> Result<ApiGatewayProxyResponse, Error> {
match request.http_method.as_str() {
"GET" => {
let users = get_users(&config.database).await?;
Ok(ApiGatewayProxyResponse {
status_code: 200,
headers: HeaderMap::new(),
body: Body::Text(serde_json::to_string(&users)?),
is_base64_encoded: false,
})
}
"POST" => {
let user_data: UserData = serde_json::from_str(&request.body.unwrap_or_default())?;
let user = create_user(&config.database, &user_data).await?;
Ok(ApiGatewayProxyResponse {
status_code: 201,
headers: HeaderMap::new(),
body: Body::Text(serde_json::to_string(&user)?),
is_base64_encoded: false,
})
}
_ => Err(Error::from("Method not allowed"))
}
}
🗄️ Database Integration
DynamoDB Integration
[dynamodb_integration]
client: "aws_sdk_dynamodb"
table_design: "single_table"
query_optimization: true[dynamodb_config]
table_name: "@env('DYNAMODB_TABLE')"
region: "@env('AWS_REGION')"
endpoint: "@env('DYNAMODB_ENDPOINT')"
[dynamodb_operations]
pub struct DynamoDBClient {
client: aws_sdk_dynamodb::Client,
table_name: String,
}
impl DynamoDBClient {
pub async fn get_item(&self, key: &str) -> Result<Option<Value>, Error> {
let result = self.client
.get_item()
.table_name(&self.table_name)
.key("id", aws_sdk_dynamodb::types::AttributeValue::S(key.to_string()))
.send()
.await?;
Ok(result.item.map(|item| {
serde_dynamo::from_item(item).unwrap()
}))
}
pub async fn put_item(&self, item: &Value) -> Result<(), Error> {
let item = serde_dynamo::to_item(item)?;
self.client
.put_item()
.table_name(&self.table_name)
.set_item(Some(item))
.send()
.await?;
Ok(())
}
pub async fn query(&self, pk: &str, sk: Option<&str>) -> Result<Vec<Value>, Error> {
let mut query = self.client
.query()
.table_name(&self.table_name)
.key_condition_expression("pk = :pk")
.expression_attribute_values(":pk", aws_sdk_dynamodb::types::AttributeValue::S(pk.to_string()));
if let Some(sk_value) = sk {
query = query
.key_condition_expression("pk = :pk AND begins_with(sk, :sk)")
.expression_attribute_values(":sk", aws_sdk_dynamodb::types::AttributeValue::S(sk_value.to_string()));
}
let result = query.send().await?;
let items: Vec<Value> = result.items
.unwrap_or_default()
.into_iter()
.map(|item| serde_dynamo::from_item(item).unwrap())
.collect();
Ok(items)
}
}
S3 Integration
[s3_integration]
client: "aws_sdk_s3"
bucket_operations: true
file_processing: true[s3_config]
bucket_name: "@env('S3_BUCKET')"
region: "@env('AWS_REGION')"
endpoint: "@env('S3_ENDPOINT')"
[s3_operations]
pub struct S3Client {
client: aws_sdk_s3::Client,
bucket_name: String,
}
impl S3Client {
pub async fn upload_file(&self, key: &str, data: &[u8]) -> Result<(), Error> {
self.client
.put_object()
.bucket(&self.bucket_name)
.key(key)
.body(aws_sdk_s3::types::ByteStream::from(data.to_vec()))
.send()
.await?;
Ok(())
}
pub async fn download_file(&self, key: &str) -> Result<Vec<u8>, Error> {
let result = self.client
.get_object()
.bucket(&self.bucket_name)
.key(key)
.send()
.await?;
let data = result.body.collect().await?.into_bytes();
Ok(data.to_vec())
}
pub async fn list_files(&self, prefix: &str) -> Result<Vec<String>, Error> {
let result = self.client
.list_objects_v2()
.bucket(&self.bucket_name)
.prefix(prefix)
.send()
.await?;
let keys: Vec<String> = result.contents
.unwrap_or_default()
.into_iter()
.filter_map(|obj| obj.key)
.collect();
Ok(keys)
}
}
🔄 Event-Driven Processing
SQS Integration
[sqs_integration]
queue_operations: true
message_processing: true
dead_letter_queue: true[sqs_config]
queue_url: "@env('SQS_QUEUE_URL')"
region: "@env('AWS_REGION')"
visibility_timeout: 30
[sqs_operations]
pub struct SQSClient {
client: aws_sdk_sqs::Client,
queue_url: String,
}
impl SQSClient {
pub async fn send_message(&self, message: &Value) -> Result<(), Error> {
let message_body = serde_json::to_string(message)?;
self.client
.send_message()
.queue_url(&self.queue_url)
.message_body(message_body)
.send()
.await?;
Ok(())
}
pub async fn receive_messages(&self, max_messages: i32) -> Result<Vec<Message>, Error> {
let result = self.client
.receive_message()
.queue_url(&self.queue_url)
.max_number_of_messages(max_messages)
.send()
.await?;
Ok(result.messages.unwrap_or_default())
}
pub async fn delete_message(&self, receipt_handle: &str) -> Result<(), Error> {
self.client
.delete_message()
.queue_url(&self.queue_url)
.receipt_handle(receipt_handle)
.send()
.await?;
Ok(())
}
}
SNS Integration
[sns_integration]
topic_operations: true
message_publishing: true
subscription_management: true[sns_config]
topic_arn: "@env('SNS_TOPIC_ARN')"
region: "@env('AWS_REGION')"
[sns_operations]
pub struct SNSClient {
client: aws_sdk_sns::Client,
topic_arn: String,
}
impl SNSClient {
pub async fn publish_message(&self, message: &Value) -> Result<(), Error> {
let message_body = serde_json::to_string(message)?;
self.client
.publish()
.topic_arn(&self.topic_arn)
.message(message_body)
.send()
.await?;
Ok(())
}
pub async fn publish_with_attributes(&self, message: &Value, attributes: HashMap<String, String>) -> Result<(), Error> {
let message_body = serde_json::to_string(message)?;
let mut publish_request = self.client
.publish()
.topic_arn(&self.topic_arn)
.message(message_body);
for (key, value) in attributes {
publish_request = publish_request.message_attributes(key, aws_sdk_sns::types::MessageAttributeValue::builder().string_value(value).build());
}
publish_request.send().await?;
Ok(())
}
}
🔒 Security Implementation
IAM Integration
[iam_integration]
role_assumption: true
permission_management: true
security_best_practices: true[iam_config]
role_arn: "@env('LAMBDA_ROLE_ARN')"
session_duration: 3600
permissions_boundary: true
[iam_operations]
pub struct IAMClient {
client: aws_sdk_sts::Client,
}
impl IAMClient {
pub async fn assume_role(&self, role_arn: &str, session_name: &str) -> Result<Credentials, Error> {
let result = self.client
.assume_role()
.role_arn(role_arn)
.role_session_name(session_name)
.duration_seconds(3600)
.send()
.await?;
let credentials = result.credentials.unwrap();
Ok(Credentials {
access_key_id: credentials.access_key_id.unwrap(),
secret_access_key: credentials.secret_access_key.unwrap(),
session_token: credentials.session_token,
})
}
}
KMS Integration
[kms_integration]
encryption: true
decryption: true
key_management: true[kms_config]
key_id: "@env('KMS_KEY_ID')"
region: "@env('AWS_REGION')"
[kms_operations]
pub struct KMSClient {
client: aws_sdk_kms::Client,
key_id: String,
}
impl KMSClient {
pub async fn encrypt(&self, plaintext: &[u8]) -> Result<Vec<u8>, Error> {
let result = self.client
.encrypt()
.key_id(&self.key_id)
.plaintext(aws_sdk_kms::types::Blob::new(plaintext))
.send()
.await?;
Ok(result.ciphertext_blob.unwrap().into_inner())
}
pub async fn decrypt(&self, ciphertext: &[u8]) -> Result<Vec<u8>, Error> {
let result = self.client
.decrypt()
.ciphertext_blob(aws_sdk_kms::types::Blob::new(ciphertext))
.send()
.await?;
Ok(result.plaintext.unwrap().into_inner())
}
}
📊 Monitoring & Observability
CloudWatch Integration
[cloudwatch_integration]
metrics: true
logs: true
alarms: true[cloudwatch_config]
namespace: "@env('CLOUDWATCH_NAMESPACE')"
region: "@env('AWS_REGION')"
[cloudwatch_operations]
pub struct CloudWatchClient {
client: aws_sdk_cloudwatch::Client,
namespace: String,
}
impl CloudWatchClient {
pub async fn put_metric(&self, metric_name: &str, value: f64, unit: &str) -> Result<(), Error> {
self.client
.put_metric_data()
.namespace(&self.namespace)
.metric_data(
aws_sdk_cloudwatch::types::MetricDatum::builder()
.metric_name(metric_name)
.value(value)
.unit(aws_sdk_cloudwatch::types::StandardUnit::from(unit))
.build()
)
.send()
.await?;
Ok(())
}
pub async fn put_custom_metric(&self, metric_name: &str, value: f64, dimensions: Vec<(String, String)>) -> Result<(), Error> {
let mut metric_datum = aws_sdk_cloudwatch::types::MetricDatum::builder()
.metric_name(metric_name)
.value(value);
for (name, value) in dimensions {
metric_datum = metric_datum.dimensions(
aws_sdk_cloudwatch::types::Dimension::builder()
.name(name)
.value(value)
.build()
);
}
self.client
.put_metric_data()
.namespace(&self.namespace)
.metric_data(metric_datum.build())
.send()
.await?;
Ok(())
}
}
X-Ray Integration
[xray_integration]
distributed_tracing: true
segment_management: true
subsegment_creation: true[xray_config]
service_name: "@env('XRAY_SERVICE_NAME')"
region: "@env('AWS_REGION')"
[xray_operations]
pub struct XRayClient {
client: aws_sdk_xray::Client,
service_name: String,
}
impl XRayClient {
pub async fn start_segment(&self, name: &str) -> Result<String, Error> {
let segment_id = Uuid::new_v4().to_string();
// Start segment logic
Ok(segment_id)
}
pub async fn end_segment(&self, segment_id: &str) -> Result<(), Error> {
// End segment logic
Ok(())
}
pub async fn add_subsegment(&self, parent_id: &str, name: &str) -> Result<String, Error> {
let subsegment_id = Uuid::new_v4().to_string();
// Add subsegment logic
Ok(subsegment_id)
}
}
🚀 Deployment & CI/CD
SAM Template
[sam_template]
template_format: "yaml"
functions: "lambda_functions"
resources: "aws_resources"[sam_config]
AWSTemplateFormatVersion: "2010-09-09"
Transform: "AWS::Serverless-2016-10-31"
Resources:
TuskProcessorFunction:
Type: AWS::Serverless::Function
Properties:
CodeUri: ./target/lambda/release/
Handler: bootstrap
Runtime: provided.al2
MemorySize: 512
Timeout: 30
Environment:
Variables:
RUST_LOG: info
TUSK_CONFIG: lambda.tusk
Events:
ApiEvent:
Type: Api
Properties:
Path: /process
Method: post
SQSEvent:
Type: SQS
Properties:
Queue: !GetAtt ProcessingQueue.Arn
BatchSize: 10
GitHub Actions
[github_actions]
workflow: "deploy_lambda"
build: "rust_build"
deploy: "sam_deploy"[workflow_config]
name: Deploy Lambda Functions
on:
push:
branches: [main]
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Setup Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
target: x86_64-unknown-linux-musl
- name: Build Lambda
run: |
cargo build --release --target x86_64-unknown-linux-musl
cp target/x86_64-unknown-linux-musl/release/tusk-processor bootstrap
zip -j tusk-processor.zip bootstrap
- name: Deploy with SAM
run: |
sam build
sam deploy --no-confirm-changeset
🎯 Best Practices
1. Function Design
- Keep functions small and focused - Use appropriate memory and timeout settings - Implement proper error handling - Use environment variables for configuration2. Performance
- Optimize cold start times - Use connection pooling - Implement caching strategies - Monitor function performance3. Security
- Use least privilege IAM roles - Encrypt sensitive data - Implement proper authentication - Regular security audits4. Monitoring
- Set up comprehensive logging - Use CloudWatch metrics - Implement distributed tracing - Set up proper alerting5. Cost Optimization
- Monitor function execution times - Use appropriate memory allocation - Implement efficient data processing - Regular cost reviewsServerless architecture with TuskLang and Rust provides a powerful foundation for building scalable, cost-effective, and maintainable cloud-native applications that can automatically scale based on demand.