Practical Examples
This page provides a structured overview and practical guide to working with the Platform API. It is intended for developers, data scientists, and technical professionals who need to integrate, test, and analyze data using SPLX Platform.
The document consolidates explanations, implementation details, and executable examples into a single reference, enabling both quick onboarding and deeper exploration of the API.
Objectives
Introduce the core concepts and capabilities of the Platform API
Provide clear examples of request and response patterns
Demonstrate typical workflows and integration strategies
Offer reproducible code samples for practical experimentation
Imports
import requests
import json
Setting variables
# Note, these variables need to be set according to your URL otherwise the API calls will not work.
# Example URL:
# https://probe.splx.ai/w/290/target/91/test-runs/862/probe/1815?tab=results
url = "https://api.probe.splx.ai" # URL for the EU deployment; us.api.probe.splx.ai for US
workspace_id = "290"
target_id = "91"
test_run_id = "862"
probe_run_id = "1815"
# Without a valid API key, none of the API calls will work.
API_KEY = "YOUR_API_KEY"
Example: Export probe run test cases
response = requests.post(
f"{url}/api/workspaces/{workspace_id}/probe-run/{probe_run_id}/test-cases/export",
headers={"X-Api-Key":API_KEY, "Content-Type":"application/json-patch+json"},
data=json.dumps({
"filters": {
"redTeam": None,
"result": ["FAILED"],
"search": None,
"strategy": None,
"variation": None
},
"format": "json"
})
)
response.json()
Endpoints for Replicating PDF Reports
The following endpoints are used to export the findings shown in PDF reports downloadable in the Platform:
Get test run status
Retrieve probe run execution data and analysis results
Retrieve overall scores and category breakdown for a target
The combined results of these three calls provide the status, detailed findings, and summary metrics required to replicate the SPLX Platform PDF reports.
response = requests.get(
f"{url}/api/workspaces/{workspace_id}/test-run/{test_run_id}/status",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
test_run_data = response.json()
response = requests.get(
f"{url}/api/workspaces/{workspace_id}/probe-run/{probe_run_id}",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
probe_run_data = response.json()
response = requests.get(
f"{url}/api/workspaces/{workspace_id}/target/{target_id}/scores",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
score_data = response.json()
Endpoints Used for Probe Settings
Get probe settings for a target
response = requests.get(
f"{url}/api/workspaces/{workspace_id}/target/{target_id}/probe-settings",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
probe_settings_data = response.json()
probe_settings_data
Endpoints Used for Remediation Tasks & Guardrail Policy Suggestions
Remediation tasks & guardrail policy suggestions for specific probe
response = requests.get(
f"{url}/api/workspaces/{workspace_id}/probe-run/{probe_run_id}",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
data = response.json()
data
Example Use Case 1: Get all FAILED Test Cases for a Given Organization
1. Get all workspaces
response = requests.get(
f"{url}/api/workspace",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
workspace_data = response.json()
workspace_data
2. Retrieve all test runs for every Target ID within a workspace
test_run_data = {}
for workspace in workspace_data:
if workspace['id'] not in test_run_data:
test_run_data[workspace['id']] = []
for target in workspace['targets']:
response = requests.get(
f"{url}/api/workspaces/{workspace['id']}/target/{target['id']}/test-runs",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
test_run_data[workspace['id']].extend(response.json()) # To fetch test cases we need the workspace ID as well as the probe run ID
test_run_data
3. For each test run, retrieve all associated Probe Run IDs
probe_run_data = {}
for workspace_id in test_run_data:
if workspace_id not in probe_run_data:
probe_run_data[workspace_id] = []
for test_run in test_run_data[workspace_id]:
if test_run['status'] == 'FINISHED':
response = requests.get(
f"{url}/api/workspaces/{workspace_id}/test-run/{test_run['id']}/status",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
if "probeRuns" in response.json():
for probe_run in response.json()['probeRuns']:
probe_run_data[workspace_id].append(probe_run)
else:
print("POSSIBLE ERROR:", response.json())
probe_run_data
4. For each probe run, retrieve all failed test cases and consolidate them into a single dataset
failed_test_cases = []
for workspace_id in probe_run_data:
for probe_run in probe_run_data[workspace_id]:
response = requests.post(
f"{url}/api/workspaces/{workspace_id}/probe-run/{probe_run['probeRunId']}/test-cases/export",
headers={"X-Api-Key":API_KEY, "Content-Type":"application/json-patch+json"},
data=json.dumps({
"filters": {
"result": ["FAILED"]
},
"format": "json"
})
)
failed_test_cases.append(response.json()) # Assume we do not care from which workspace the test cases are, we just want to combine them all
failed_test_cases
Example Use Case 2: Retrieving FAILED Test Cases for a Specific Benchmark Model and Probe Run
1. Retrieve all benchmark models to view their IDs and details
response = requests.get(
f"{url}/api/v2/benchmarks/models",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
all_benchmarks_data = response.json()
all_benchmarks_data
response = requests.get(
f"{url}/api/v2/benchmarks/types",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
benchmark_types = response.json()
benchmark_types
2. Retrieve specific benchmark test cases
Example: Retrieve data for OpenAI 4o without a system prompt
model_id = 15
benchmark_type = 1
response = requests.get(
f"{url}/api/v2/benchmarks/models/{model_id}/runs?benchmarkTypeId={benchmark_type}",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
model_data = response.json()
model_data
3. Retrieve data for a specific probe run
Example: Get the Context Leakage probe run
probe_run_id = 2620
response = requests.get(
f"{url}/api/v2/benchmarks/models/{model_id}/runs/{probe_run_id}/test-cases",
headers={"X-Api-Key":API_KEY,"Accept":"*/*"},
)
cl_probe_data = response.json()
cl_probe_data
4. Retrieve all failed test cases for this probe run
cl_failed_data = []
for conversation in cl_probe_data["results"]:
if conversation["status"] == "FAILED":
cl_failed_data.append(conversation)
cl_failed_data
Last updated