Rust Integration
Secure your Axum, Actix, or Rocket applications with type-safe content moderation.
Prerequisites
1. Installation
Start by adding the SafeComms crate to your Cargo.toml file.
$ cargo add safecomms
2. Initialize Client
Next, initialize the SafeComms client with your API key. This creates a connection to our analysis engine.
main.rs
use safecomms::SafeCommsClient;
use std::env;
// Initialize with your API Key
let client = SafeCommsClient::new(
env::var("SAFECOMMS_API_KEY").expect("SAFECOMMS_API_KEY must be set"),
None
);3. Add Handler Logic
Add the moderation logic to your route handler. This will scan the incoming request content before processing it further.
main.rs
async fn create_comment(
State(client): State<SafeCommsClient>,
Json(payload): Json<CreateComment>,
) -> impl IntoResponse {
// Check content
let result = client.moderate_text(
&payload.content,
Some("en"), // language
Some(false), // replace
Some(false), // pii
None, // replace_severity
None // moderation_profile_id
).await;
match result {
Ok(res) => {
if !res.is_clean {
return (StatusCode::BAD_REQUEST, Json(res)).into_response();
}
// Content is safe
(StatusCode::OK, Json(json!({ "success": true }))).into_response()
},
Err(_) => (StatusCode::INTERNAL_SERVER_ERROR, "Moderation check failed").into_response(),
}
}4. Verify & Test
Finally, verify your integration is working correctly by sending a test request.
Terminal
curl -X POST http://localhost:3000/api/comments \
-H "Content-Type: application/json" \
-d '{"content": "This is some sample text with profanity"}'Expected Output (400 Bad Request)
{
"id": "req_123abc",
"isClean": false,
"severity": "Critical",
"categoryScores": {
"profanity": 0.98,
"toxicity": 0.85
},
"reason": "Content contains profanity"
}5. Complete Example
Here is the full code block ready to copy and paste.
main.rs
use axum::{
extract::State,
http::StatusCode,
response::IntoResponse,
routing::post,
Json, Router,
};
use safecomms::SafeCommsClient;
use serde::Deserialize;
use serde_json::json;
use std::env;
use std::net::SocketAddr;
#[derive(Deserialize)]
struct CreateComment {
content: String,
}
#[tokio::main]
async fn main() {
// Initialize with your API Key
let client = SafeCommsClient::new(
env::var("SAFECOMMS_API_KEY").expect("SAFECOMMS_API_KEY must be set"),
None,
);
let app = Router::new()
.route("/api/comments", post(create_comment))
.with_state(client);
let addr = SocketAddr::from(([127, 0, 0, 1], 3000));
println!("listening on {}", addr);
axum::Server::bind(&addr)
.serve(app.into_make_service())
.await
.unwrap();
}
async fn create_comment(
State(client): State<SafeCommsClient>,
Json(payload): Json<CreateComment>,
) -> impl IntoResponse {
// 1. Check content
let result = client.moderate_text(
&payload.content,
Some("en"),
Some(false),
Some(false), // pii
None,
None
).await;
match result {
Ok(res) => {
// 2. Act on result
if !res.is_clean {
return (StatusCode::BAD_REQUEST, Json(res)).into_response();
}
// 3. Content is safe, proceed to save...
// db.save_comment(&payload.content).await;
(StatusCode::OK, Json(json!({ "success": true }))).into_response()
},
Err(e) => {
println!("Error: {}", e);
(StatusCode::INTERNAL_SERVER_ERROR, "Moderation check failed").into_response()
},
}
}Configuration & Tuning
Need to adjust sensitivity or allow certain words? You don't need to change your code. Head to the dashboard to configure your moderation profile globally.