SDK Examples
Code examples for integrating with the Fula API using various languages and tools.
Rust Client SDK
The official Rust client provides type-safe access to all Fula API operations with built-in encryption support.
Installation
Add to your Cargo.toml:
Features
- Type-safe - Compile-time guarantees
- Async/await - Built on Tokio
- Encryption - HPKE client-side encryption
- Streaming - Efficient large file handling
[dependencies]
fula-client = "0.1"
tokio = { version = "1", features = ["full"] }
use fula_client::{FulaClient, ClientConfig};
#[tokio::main]
async fn main() -> Result<(), Box> {
// Create client
let config = ClientConfig::new("http://localhost:9000")
.with_token("your-auth-token");
let client = FulaClient::new(config).await?;
// Create bucket
client.create_bucket("my-bucket").await?;
// Upload object
let data = b"Hello, Fula!";
let etag = client.put_object("my-bucket", "hello.txt", data).await?;
println!("Uploaded with ETag: {}", etag);
// Download object
let content = client.get_object("my-bucket", "hello.txt").await?;
println!("Content: {}", String::from_utf8_lossy(&content));
// List objects
let objects = client.list_objects("my-bucket", None).await?;
for obj in objects {
println!(" {} ({} bytes)", obj.key, obj.size);
}
// Delete object
client.delete_object("my-bucket", "hello.txt").await?;
Ok(())
}
Client-Side Encryption
Fula supports transparent client-side encryption using HPKE (Hybrid Public Key Encryption).
How It Works
- Generate or load a key pair
- Data is encrypted locally before upload
- Only you can decrypt the data
- Server never sees plaintext
Encryption Features
- HPKE - RFC 9180 compliant
- BLAKE3 - Fast, secure hashing
- Bao - Verified streaming
- X25519 - Elliptic curve DH
use fula_client::{FulaClient, ClientConfig, EncryptionConfig};
use fula_crypto::keys::KekKeyPair;
#[tokio::main]
async fn main() -> Result<(), Box> {
// Generate encryption keys
let keypair = KekKeyPair::generate();
// Configure client with encryption
let config = ClientConfig::new("http://localhost:9000")
.with_token("your-auth-token")
.with_encryption(EncryptionConfig::new(keypair));
let client = FulaClient::new(config).await?;
// Upload encrypted data
let sensitive_data = b"This is encrypted before upload";
client.put_object_encrypted(
"secure-bucket",
"secret.txt",
sensitive_data
).await?;
// Download and decrypt
let decrypted = client.get_object_decrypted(
"secure-bucket",
"secret.txt"
).await?;
assert_eq!(sensitive_data.as_slice(), decrypted.as_slice());
println!("Decrypted: {}", String::from_utf8_lossy(&decrypted));
Ok(())
}
use fula_crypto::keys::{KekKeyPair, KeyManager};
// Generate new key pair
let keypair = KekKeyPair::generate();
// Export for backup (encrypt this!)
let private_bytes = keypair.secret_key().to_bytes();
let public_bytes = keypair.public_key().to_bytes();
// Restore from bytes
let restored = KekKeyPair::from_bytes(&private_bytes)?;
// Key manager for multiple keys
let mut manager = KeyManager::new(keypair);
manager.add_recipient("alice", alice_public_key);
manager.add_recipient("bob", bob_public_key);
š Metadata Privacy
Protect not just your file content, but also file names, sizes, and timestamps from the server.
What's Protected
- File Names - Server sees obfuscated hash, not "tax_returns_2024.pdf"
- File Sizes - Server sees ciphertext size, not original
- Content Types - Server sees "application/octet-stream"
- Timestamps - Encrypted in private metadata
- User Metadata - All custom fields encrypted
Obfuscation Modes
- FlatNamespace (Default) - Complete structure hiding, maximum privacy
- DeterministicHash - Same path ā same key
- RandomUuid - Random key per upload
- PreserveStructure - Keep folders, hash filenames
use fula_client::{EncryptedClient, EncryptionConfig, KeyObfuscation, Config};
// FlatNamespace (maximum privacy) is ENABLED by default
let encryption = EncryptionConfig::new();
// Or customize the obfuscation mode
let encryption = EncryptionConfig::new()
.with_obfuscation_mode(KeyObfuscation::PreserveStructure);
// Disable if not needed (not recommended)
let encryption = EncryptionConfig::new_without_privacy();
let config = Config::new("http://localhost:9000").with_token("token");
let client = EncryptedClient::new(config, encryption)?;
// Upload - server sees: "e/a7c3f9b2e8d14a6f"
// You use: "/finances/tax_2024.pdf"
client.put_object_encrypted(bucket, "/finances/tax_2024.pdf", data).await?;
// Retrieve using original path
let data = client.get_object_decrypted(bucket, "/finances/tax_2024.pdf").await?;
// SERVER'S VIEW (Storage Node):
// āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā
// ā Key: e/a7c3f9b2e8d14a6f ā
// ā Size: 156,821 bytes (ciphertext) ā
// ā Type: application/octet-stream ā
// ā Data: [encrypted blob] ā
// āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā
// YOUR VIEW (Client):
// āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā
// ā Key: /finances/tax_2024.pdf ā
// ā Size: 156,789 bytes (original) ā
// ā Type: application/pdf ā
// ā Data: [decrypted PDF document] ā
// āāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāāā
use fula_client::{EncryptedClient, EncryptionConfig, Config};
// FlatNamespace provides COMPLETE structure hiding
// Inspired by WNFS and Peergos
let encryption = EncryptionConfig::new_flat_namespace();
let config = Config::new("http://localhost:9000").with_token("token");
let client = EncryptedClient::new(config, encryption)?;
// Upload - server sees: "QmX7a8f3e2d1c9b4a5e6f7d8c9a0b1e2f3"
// NO prefix, NO structure hints!
client.put_object_flat(bucket, "/photos/vacation/beach.jpg", data, None).await?;
// Server CANNOT determine:
// ⢠Which objects are files vs folders
// ⢠Parent/child relationships
// ⢠How many files in each "folder"
// ⢠Any directory structure
// List files from encrypted PrivateForest index
let files = client.list_files_from_forest(bucket).await?;
for file in files {
println!("{} - {}", file.original_key, file.size_human());
}
š File Manager API
Browse encrypted files without downloading content. Perfect for file managers, sync tools, and directory browsers.
Key Benefits
- Bandwidth Efficient - Only ~1-2KB per file (headers only)
- Fast Browsing - List 1000 files without downloading 1GB
- Decrypted Names - See real filenames, not hashes
- Full Metadata - Size, type, timestamps, custom fields
Operations
head_object_decrypted()- Single file metadatalist_objects_decrypted()- All files with metadatalist_directory()- Tree structure view
use fula_client::{EncryptedClient, EncryptionConfig, Config};
let client = EncryptedClient::new(config, EncryptionConfig::new())?;
// List ALL files with decrypted metadata - NO content download!
let files = client.list_objects_decrypted(bucket, None).await?;
for file in &files {
println!("š {} ({}) - {}",
file.filename(), // "report.pdf" (decrypted)
file.size_human(), // "1.5 MB"
file.content_type.as_deref().unwrap_or("unknown")
);
}
// Total bandwidth: ~1KB per file (just headers)
// NOT: downloading entire file content
// Get metadata WITHOUT downloading content
let metadata = client.head_object_decrypted(bucket, storage_key).await?;
println!("Original name: {}", metadata.original_key);
println!("Size: {} bytes", metadata.original_size);
println!("Type: {:?}", metadata.content_type);
println!("Created: {:?}", metadata.created_at);
println!("Modified: {:?}", metadata.modified_at);
// Only download content when user clicks "Open"
if user_wants_to_open {
let content = client.get_object_decrypted_by_storage_key(
bucket, &metadata.storage_key
).await?;
}
// Get files grouped by directory
let listing = client.list_directory(bucket, Some("/photos/")).await?;
println!("š {} files, {} total",
listing.file_count(),
format_size(listing.total_size())
);
for dir in listing.get_directories() {
println!("\nš {}/", dir);
if let Some(files) = listing.get_files(dir) {
for file in files {
let icon = match file.content_type.as_deref() {
Some(t) if t.starts_with("image/") => "š¼ļø",
Some(t) if t.starts_with("video/") => "š¬",
Some(t) if t.contains("pdf") => "š",
_ => "š",
};
println!(" {} {} ({})", icon, file.filename(), file.size_human());
}
}
}
š¤ Secure Sharing
Share files and folders with others without exposing your master key.
Features
- Path-Scoped - Share specific folders, not everything
- Time-Limited - Set expiry for temporary access
- Permissions - Read, write, delete controls
- Revocable - Cancel access anytime
- Zero Knowledge - Server can't read shared content
How It Works
- Create share token with recipient's public key
- DEK is re-encrypted for recipient (not shared directly)
- Recipient decrypts with their private key
- Your master key is never exposed
use fula_crypto::{
KekKeyPair, DekKey,
sharing::{ShareBuilder, ShareToken, AcceptedShare},
hpke::SharePermissions,
};
// Alice (owner) wants to share with Bob
let alice_keypair = KekKeyPair::generate();
let bob_keypair = KekKeyPair::generate();
let file_dek = DekKey::generate(); // File's encryption key
// Create share token for Bob (expires in 7 days)
let share_token = ShareBuilder::new(
&alice_keypair,
bob_keypair.public_key(),
&file_dek,
)
.with_path_scope("/photos/vacation/") // Only this folder
.with_expiry_days(7) // 7 days access
.with_permissions(SharePermissions::read_only())
.build()?;
// Send token to Bob (via any channel)
let token_json = serde_json::to_string(&share_token)?;
// Bob receives the share token
let share_token: ShareToken = serde_json::from_str(&token_json)?;
// Verify and accept
let accepted = AcceptedShare::accept(&share_token, &bob_keypair)?;
// Now Bob can decrypt files in /photos/vacation/
let decrypted_dek = accepted.dek();
// Check permissions
if accepted.can_write() {
// Bob can modify files
}
if accepted.is_expired() {
// Access has expired
}
use fula_crypto::sharing::FolderShareManager;
// Create share manager for a folder
let mut share_manager = FolderShareManager::new(
owner_keypair.clone(),
"/documents/project-x/".to_string(),
);
// Add multiple recipients
share_manager.add_recipient(
"alice",
alice_public_key,
SharePermissions::read_write(),
)?;
share_manager.add_recipient(
"bob",
bob_public_key,
SharePermissions::read_only(),
)?;
// Revoke access
share_manager.revoke("bob")?;
// Check who has access
for (name, _) in share_manager.list_recipients() {
println!("Shared with: {}", name);
}
Multipart Uploads
Upload large files efficiently using multipart uploads with progress tracking.
Benefits
- Resumable - Continue failed uploads
- Parallel - Upload parts concurrently
- Progress - Track upload progress
- Large files - Up to 5TB per object
Part Size
Default: 5MB per part. Minimum: 5MB (except last part).
use fula_client::multipart::upload_large_file;
use std::sync::Arc;
#[tokio::main]
async fn main() -> Result<(), Box> {
let client = Arc::new(FulaClient::new(config).await?);
// Read large file
let data = std::fs::read("large-video.mp4")?;
// Upload with progress callback
let etag = upload_large_file(
client,
"media-bucket",
"videos/large-video.mp4",
data.into(),
Some(Box::new(|progress| {
println!(
"Uploading: {:.1}% ({}/{} bytes) - Part {}/{}",
progress.percentage(),
progress.bytes_uploaded,
progress.total_bytes,
progress.current_part,
progress.total_parts
);
})),
).await?;
println!("Upload complete! ETag: {}", etag);
Ok(())
}
use fula_client::multipart::MultipartUpload;
// Start multipart upload
let mut upload = MultipartUpload::start(
client.clone(),
"bucket",
"large-file.zip"
).await?;
println!("Upload ID: {}", upload.upload_id());
// Upload parts
for (i, chunk) in data.chunks(5 * 1024 * 1024).enumerate() {
let etag = upload.upload_part((i + 1) as u32, chunk).await?;
println!("Part {} uploaded: {}", i + 1, etag);
}
// Complete upload
let final_etag = upload.complete().await?;
println!("Complete: {}", final_etag);
AWS CLI
Use the standard AWS CLI with Fula's S3-compatible endpoint.
Configuration
Configure AWS CLI to use the Fula endpoint:
Supported Commands
aws s3 ls- List buckets/objectsaws s3 cp- Upload/downloadaws s3 rm- Delete objectsaws s3 mb- Create bucketaws s3 rb- Remove bucketaws s3 sync- Sync directories
# Configure credentials
aws configure set aws_access_key_id YOUR_ACCESS_KEY
aws configure set aws_secret_access_key YOUR_SECRET_KEY
# Set endpoint alias
alias fula='aws --endpoint-url http://localhost:9000'
# List all buckets
fula s3 ls
# Create bucket
fula s3 mb s3://my-bucket
# Upload file
fula s3 cp ./local-file.txt s3://my-bucket/remote-file.txt
# Upload directory
fula s3 cp ./my-folder s3://my-bucket/folder/ --recursive
# Download file
fula s3 cp s3://my-bucket/remote-file.txt ./downloaded.txt
# List objects with prefix
fula s3 ls s3://my-bucket/prefix/
# Delete object
fula s3 rm s3://my-bucket/old-file.txt
# Sync local to remote
fula s3 sync ./local-dir s3://my-bucket/backup/
# Delete bucket (must be empty)
fula s3 rb s3://my-bucket
Python (boto3)
Use the official AWS SDK for Python with Fula's S3-compatible API.
Installation
pip install boto3
Features
- Full S3 API compatibility
- Automatic retries
- Multipart upload support
- Streaming transfers
import boto3
from botocore.config import Config
# Create client
s3 = boto3.client(
's3',
endpoint_url='http://localhost:9000',
aws_access_key_id='YOUR_ACCESS_KEY',
aws_secret_access_key='YOUR_SECRET_KEY',
config=Config(signature_version='s3v4')
)
# Create bucket
s3.create_bucket(Bucket='my-bucket')
# Upload file
s3.upload_file('local-file.txt', 'my-bucket', 'remote-file.txt')
# Upload with metadata
s3.put_object(
Bucket='my-bucket',
Key='document.pdf',
Body=open('document.pdf', 'rb'),
ContentType='application/pdf',
Metadata={'author': 'John Doe'}
)
# Download file
s3.download_file('my-bucket', 'remote-file.txt', 'downloaded.txt')
# List objects
response = s3.list_objects_v2(Bucket='my-bucket', Prefix='docs/')
for obj in response.get('Contents', []):
print(f"{obj['Key']} - {obj['Size']} bytes")
# Delete object
s3.delete_object(Bucket='my-bucket', Key='old-file.txt')
# Presigned URL (for sharing)
url = s3.generate_presigned_url(
'get_object',
Params={'Bucket': 'my-bucket', 'Key': 'file.txt'},
ExpiresIn=3600 # 1 hour
)
print(f"Share URL: {url}")
JavaScript / TypeScript
Use the AWS SDK for JavaScript with Fula's S3-compatible endpoint.
Installation
npm install @aws-sdk/client-s3
Browser Support
Works in Node.js and browsers with proper CORS configuration.
import {
S3Client,
CreateBucketCommand,
PutObjectCommand,
GetObjectCommand,
ListObjectsV2Command,
DeleteObjectCommand,
} from '@aws-sdk/client-s3';
// Create client
const s3 = new S3Client({
endpoint: 'http://localhost:9000',
region: 'us-east-1',
credentials: {
accessKeyId: 'YOUR_ACCESS_KEY',
secretAccessKey: 'YOUR_SECRET_KEY',
},
forcePathStyle: true, // Required for S3-compatible services
});
async function main() {
// Create bucket
await s3.send(new CreateBucketCommand({
Bucket: 'my-bucket',
}));
// Upload object
await s3.send(new PutObjectCommand({
Bucket: 'my-bucket',
Key: 'hello.txt',
Body: 'Hello, Fula!',
ContentType: 'text/plain',
}));
// Download object
const response = await s3.send(new GetObjectCommand({
Bucket: 'my-bucket',
Key: 'hello.txt',
}));
const content = await response.Body?.transformToString();
console.log('Content:', content);
// List objects
const list = await s3.send(new ListObjectsV2Command({
Bucket: 'my-bucket',
Prefix: 'docs/',
}));
for (const obj of list.Contents ?? []) {
console.log(`${obj.Key} - ${obj.Size} bytes`);
}
// Delete object
await s3.send(new DeleteObjectCommand({
Bucket: 'my-bucket',
Key: 'hello.txt',
}));
}
main().catch(console.error);