Getting Started with Managed Inference
All models listed in the overview can be queried via the api.crusoe.ai path.
Retrieving your Inference API token
You can retrieve your Inference API token via the Crusoe Cloud console by following the steps below.
- UI
- Visit the [Crusoe Cloud console](https://console.crusoecloud.com/)
- Click the "Security" tab in the left nav
- Select the "Inference API Key" tab in the top bar
- Click the "Create Inference API Key" button on the page
- Optionally provide an alias or an expiration date
- Click the "Create" button to view and save your API key
Querying Text models
After retrieving an API key from the Crusoe Console, you can use the OpenAI SDK to make requests. The example below uses the meta-llama/Llama-3.3-70B-Instruct model.
import os
from openai import OpenAI
CRUSOE_API_KEY = os.getenv("CRUSOE_API_KEY")
client = OpenAI(
api_key=CRUSOE_API_KEY,
base_url="https://api.crusoe.ai/v1",
)
completion = client.chat.completions.create(
model="meta-llama/Llama-3.3-70B-Instruct",
messages=[
{"role": "system", "content": "You are a helpful, concise assistant."},
{"role": "user", "content": "Who is Robinson Crusoe?"},
],
)
print(completion.choices[0].message.content)
Interacting with Video to Video models (Batch)
There are four main steps involved in submitting a request to the Decart Mirage - Batch model, namely:
- Upload the input video via the files API
- Submit the request to the queue API
- Monitor the status of the job via the queue API
- Download the output video via the files API
We break this down via the code snippets below.
- Python
- TypeScript
- cURL
import requests
import time
INPUT_VIDEO_PATH = "<INPUT_VIDEO_PATH>"
OUTPUT_VIDEO_PATH = "<OUTPUT_VIDEO_PATH>"
AUTH_TOKEN = "<YOUR_API_KEY>"
PROJECT_ID = "<YOUR_PROJECT_ID>"
# Step 1: Upload the video file.
with open(INPUT_VIDEO_PATH, "rb") as f:
upload_response = requests.post(
"https://api-video.crusoe.ai/v1/files",
headers={
"Authorization": f"Bearer {AUTH_TOKEN}",
"Crusoe-Project-Id": PROJECT_ID,
},
files={"file": f},
data={"purpose": "video"},
)
upload_response.raise_for_status()
upload_data = upload_response.json()
file_id = upload_data["id"]
if not file_id:
raise ValueError("Could not find 'id' in upload response.")
print(f"File uploaded successfully. id: {file_id}")
# Step 2: Submit the inference request.
inference_response = requests.post(
"https://api-video.crusoe.ai/v1/queue/decart/miragelsd-1-batch/enhanced",
headers={
"Authorization": f"Bearer {AUTH_TOKEN}",
"Crusoe-Project-Id": PROJECT_ID,
"Content-Type": "application/json",
},
json={
"file_id": file_id,
"prompt": "<YOUR_PROMPT_DESCRIBING_THE_DESIRED_VIDEO>",
},
)
inference_response.raise_for_status()
inference_data = inference_response.json()
request_id = inference_data["request_id"]
status_url = inference_data["status_url"]
if not request_id or not status_url:
raise ValueError("Could not find 'request_id' or 'status_url' in enqueue response.")
print(f"Request enqueued. request_id: {request_id}")
# Step 3: Poll for job completion.
is_complete = False
result_file_id = None
while not is_complete:
status_response = requests.get(
status_url,
headers={
"Authorization": f"Bearer {AUTH_TOKEN}",
"Crusoe-Project-Id": PROJECT_ID,
},
)
status_response.raise_for_status()
status_data = status_response.json()
status = status_data["status"]
if status == 200:
is_complete = True
result_file_id = status_data["result_file_id"]
else:
time.sleep(2)
print(f"Request complete. result_file_id: {result_file_id}")
# Step 4: Download the result.
download_response = requests.get(
f"https://api-video.crusoe.ai/v1/files/{result_file_id}",
headers={
"Authorization": f"Bearer {AUTH_TOKEN}",
"Crusoe-Project-Id": PROJECT_ID,
},
stream=True,
)
download_response.raise_for_status()
with open(OUTPUT_VIDEO_PATH, "wb") as f:
for chunk in download_response.iter_content(chunk_size=8192):
f.write(chunk)
print(f"File downloaded successfully to: {OUTPUT_VIDEO_PATH}")
import * as fs from "fs";
const INPUT_VIDEO_PATH = "<INPUT_VIDEO_PATH>";
const OUTPUT_VIDEO_PATH = "<OUTPUT_VIDEO_PATH>";
const AUTH_TOKEN = "<YOUR_API_KEY>";
const PROJECT_ID = "<YOUR_PROJECT_ID>";
// Step 1: Upload the video file.
const formData = new FormData();
formData.append("purpose", "video");
const videoBuffer = fs.readFileSync(INPUT_VIDEO_PATH);
formData.append("file", new Blob([videoBuffer], { type: "video/mp4" }));
const uploadResponse = await fetch("https://api-video.crusoe.ai/v1/files", {
method: "POST",
headers: {
Authorization: `Bearer ${AUTH_TOKEN}`,
"Crusoe-Project-Id": PROJECT_ID,
},
body: formData,
});
if (!uploadResponse.ok) {
throw new Error(`Upload failed with status ${uploadResponse.status}`);
}
const uploadData = await uploadResponse.json();
const fileId = uploadData.id;
if (!fileId) {
throw new Error("Could not find 'id' in upload response.");
}
console.log(`File uploaded successfully. id: ${fileId}`);
// Step 2: Submit the inference request.
const inferenceResponse = await fetch(
"https://api-video.crusoe.ai/v1/queue/decart/miragelsd-1-batch/enhanced",
{
method: "POST",
headers: {
Authorization: `Bearer ${AUTH_TOKEN}`,
"Crusoe-Project-Id": PROJECT_ID,
"Content-Type": "application/json",
},
body: JSON.stringify({
file_id: fileId,
prompt: "<YOUR_PROMPT_DESCRIBING_THE_DESIRED_VIDEO>",
}),
}
);
if (!inferenceResponse.ok) {
throw new Error(
`Inference request failed with status ${inferenceResponse.status}`
);
}
const inferenceData = await inferenceResponse.json();
const requestId = inferenceData.request_id;
const statusUrl = inferenceData.status_url;
if (!requestId || !statusUrl) {
throw new Error(
"Could not find 'request_id' or 'status_url' in enqueue response."
);
}
console.log(`Request enqueued. request_id: ${requestId}`);
// Step 3: Poll for job completion.
let isComplete = false;
let resultFileId = null;
while (!isComplete) {
const statusResponse = await fetch(statusUrl, {
method: "GET",
headers: {
Authorization: `Bearer ${AUTH_TOKEN}`,
"Crusoe-Project-Id": PROJECT_ID,
},
});
if (!statusResponse.ok) {
throw new Error(`Failed to get status. status: ${statusResponse.status}`);
}
const statusData = await statusResponse.json();
const status = statusData.status;
if (status === 200) {
isComplete = true;
resultFileId = statusData.result_file_id;
} else {
await new Promise((resolve) => setTimeout(resolve, 2000));
}
}
console.log(`Request complete. result_file_id: ${resultFileId}`);
// Step 4: Download the result.
const downloadResponse = await fetch(
`https://api-video.crusoe.ai/v1/files/${resultFileId}`,
{
method: "GET",
headers: {
Authorization: `Bearer ${AUTH_TOKEN}`,
"Crusoe-Project-Id": PROJECT_ID,
},
}
);
if (!downloadResponse.ok) {
throw new Error(`Download failed with status ${downloadResponse.status}`);
}
const arrayBuffer = await downloadResponse.arrayBuffer();
fs.writeFileSync(OUTPUT_VIDEO_PATH, Buffer.from(arrayBuffer));
console.log(`File downloaded successfully to: ${OUTPUT_VIDEO_PATH}`);
# Step 1: Upload the video file.
curl -X POST https://api-video.crusoe.ai/v1/files \
-H 'Authorization: Bearer <YOUR_API_KEY>' \
-H 'Crusoe-Project-Id: <PROJECT_ID>' \
-F "purpose=video" \
-F "file=@/path/to/your/video.mp4"
# Step 2: Submit the inference request.
curl -X POST https://api-video.crusoe.ai/v1/queue/decart/miragelsd-1-batch/enhanced \
-H 'Authorization: Bearer <YOUR_API_KEY>' \
-H 'Crusoe-Project-Id: <PROJECT_ID>' \
-H 'Content-Type: application/json' \
-d '{
"file_id": "<FILE_ID_FROM_STEP_1>",
"prompt": "<YOUR_PROMPT_DESCRIBING_THE_DESIRED_VIDEO>"
}'
# Step 3: Get the job status.
curl -X GET https://api-video.crusoe.ai/v1/queue/decart/miragelsd-1-batch/requests/<REQUEST_ID_FROM_STEP_2> \
-H 'Authorization: Bearer <YOUR_API_KEY>' \
-H 'Crusoe-Project-Id: <PROJECT_ID>'
# Step 4: Download the result.
curl -X GET https://api-video.crusoe.ai/v1/files/<FILE_ID_FROM_STEP_3> \
-H 'Authorization: Bearer <YOUR_API_KEY>' \
-H 'Crusoe-Project-Id: <PROJECT_ID>' \
-o ./output.mp4