Skip to main content
POST
/
v1
/
run
/
inference
Typescript (SDK)
import { RaijinLabsLucidAi } from "raijin-labs-lucid-ai";

const raijinLabsLucidAi = new RaijinLabsLucidAi();

async function run() {
  const result = await raijinLabsLucidAi.run.inference({});

  console.log(result);
}

run();
{
  "success": true,
  "run_id": "<string>",
  "tokens_in": 123,
  "tokens_out": 123,
  "ttft_ms": 123,
  "total_latency_ms": 123,
  "model_passport_id": "<string>",
  "compute_passport_id": "<string>",
  "runtime": "<string>",
  "request_id": "<string>",
  "trace_id": "<string>",
  "text": "<string>",
  "finish_reason": "<string>",
  "policy_hash": "<string>",
  "receipt_id": "<string>",
  "used_fallback": true,
  "fallback_reason": "<string>",
  "error": "<string>",
  "error_code": "<string>"
}

Body

application/json
model_passport_id
string
model
string
prompt
string
messages
object[]
max_tokens
integer
temperature
number
top_p
number
top_k
integer
stop
string[]
stream
boolean
policy
object
compute_passport_id
string
trace_id
string
request_id
string

Response

Non-streaming inference result

success
boolean
required
run_id
string
required
tokens_in
integer
required
tokens_out
integer
required
ttft_ms
integer
required
total_latency_ms
integer
required
model_passport_id
string
required
compute_passport_id
string
required
runtime
string
required
request_id
string
trace_id
string
text
string
finish_reason
string
policy_hash
string
receipt_id
string
used_fallback
boolean
fallback_reason
string
error
string
error_code
string