How to upload predictions on videos in a model run
Open this Colab for an interactive tutorial about uploading predictions on videos in a model run.
Supported annotations
To upload predictions in Labelbox, you need to create a predictions payload. In this section, we provide this payload for every supported prediction type.
Labelbox only supports NDJSON format for the video predictions payload.
Uploading confidence scores is optional
If you do not specify a confidence score, the prediction will be treated as if it had a confidence score of 1.
Classification: Radio (single-choice)
radio_prediction_ndjson = {
'name': 'radio_question',
'answer': {'name': 'second_radio_answer', 'confidence': 0.5}
}
Nested classifications: Radio and checklist
# Radio
nested_radio_prediction_ndjson = {
"name": "nested_radio_question",
"answer": {"name": "first_radio_answer", 'confidence': 0.5 },
"classifications" : [
{
'name': 'sub_radio_question',
'answer': {'name': 'first_sub_radio_answer', 'confidence': 0.5 }
}
]
}
# Checklist
nested_checklist_prediction_ndjson = {
"name": "nested_checklist_question",
"answer": [{"name": "first_checklist_answer", "confidence": 0.5,
"classifications" : [
{
"name": "sub_checklist_question",
"answer": {"name": "first_sub_checklist_answer", 'confidence': 0.5 }
}
]
}]}
Classification: Checklist (multi-choice)
checklist_prediction_ndjson = {
'name': 'checklist_question',
'answer': [
{'name': 'first_checklist_answer', 'confidence': 0.5},
{'name': 'second_checklist_answer', 'confidence': 0.5}
]
}
Bounding box
# Confidence scores are not supported for frame specific bounding box annotations
bbox_prediction_ndjson = {
"name" : "bbox_video",
"segments" : [{
"keyframes" : [
{
"frame": 1,
"bbox" : {
"top": 146.0,
"left": 98.0,
"height": 382.0,
"width": 341.0
}
},
{
"frame": 2,
"bbox" : {
"top": 146.0,
"left": 98.0,
"height": 382.0,
"width": 341.0
}
},
{
"frame": 3,
"bbox" : {
"top": 146.0,
"left": 98.0,
"height": 382.0,
"width": 341.0
}
},
{
"frame": 4,
"bbox" : {
"top": 146.0,
"left": 98.0,
"height": 382.0,
"width": 341.0
}
},
{
"frame": 5,
"bbox" : {
"top": 146.0,
"left": 98.0,
"height": 382.0,
"width": 341.0
}
}
]
}
]
}
Bounding box with nested frame-specific classification
# Confidence scores are not supported for bounding box, but they can be used
# for nested classifications under tools.
# For this type of annotations, we only support the usage of schema ids in the classification ,
# please run demo colab notebook at the top of the page to learn how to fetch ontology schema ids.
bbox_with_radio_subclass_prediction_ndjson = {
"name": "bbox_with_radio_subclass",
"segments" : [{
"keyframes" : [
{
"frame": 13,
"bbox" : {
"top": 146.0,
"left": 98.0,
"height": 382.0,
"width": 341.0
},
"classifications": [{
"schemaId": "",
"answer":
{"schemaId": "", "confidence": 0.5}
}]
},
{
"frame": 14,
"bbox" : {
"top": 146.0,
"left": 98.0,
"height": 382.0,
"width": 341.0
},
"classifications": [{
"schemaId": "",
"answer":
{"schemaId": "", "confidence": 0.5}
}]
},
{
"frame": 15,
"bbox" : {
"top": 146.0,
"left": 98.0,
"height": 382.0,
"width": 341.0
},
"classifications": [{
"schemaId": "",
"answer":
{"schemaId": "", "confidence": 0.5}
}]
}
]
}
]
}
Point
# confidence scores are not supported in point annotations
point_prediction_ndjson = {
"name": "point_video",
"segments": [{
"keyframes": [{
"frame": 17,
"point" : {
"x": 660.134 ,
"y": 407.926
},
}]
}]
}
Polyline
# confidence scores are not supported in polyline annotations
polyline_prediction_ndjson = {
"name": "line_video_frame",
"segments": [
{
"keyframes": [
{
"frame": 5,
"line": [{
"x": 680,
"y": 100
},{
"x": 100,
"y": 190
},{
"x": 190,
"y": 220
}],
},
{
"frame": 12,
"line": [{
"x": 680,
"y": 280
},{
"x": 300,
"y": 380
},{
"x": 400,
"y": 460
}],
},
{
"frame": 20,
"line": [{
"x": 680,
"y": 180
},{
"x": 100,
"y": 200
},{
"x": 200,
"y": 260
}],
}
]
},
{
"keyframes": [
{
"frame": 24,
"line": [{
"x": 300,
"y": 310
},{
"x": 330,
"y": 430
}],
},
{
"frame": 45,
"line": [{
"x": 600,
"y": 810
},{
"x": 900,
"y": 930
}],
}
]
}
]
}
End-to-end example: Upload predictions to a Model Run
Follow the steps below to upload predictions to a model run.
Before you start
You will need to import these two libraries to use the code examples in this section:
import labelbox as lb
import uuid
Replace with your API key
To learn how to create an API key, please follow the instructions on this page.
API_KEY = ""
client = lb.Client(API_KEY)
Define a helper method to set the feature schema IDs
This is only required on the "Bounding box with nested frame-specific classification" prediction since this combination does not currently support name
.
def set_feature_schema_id(features, nested_ndjson):
"""
features: accespts a project's ontology features
nested_ndjson: nested ndjson annotation object
"""
for i in features['tools']:
print(i)
if i['name'] == "bbox_with_radio_subclass":
#Classification feature schema id
class_feature_schema_id = i['classifications'][0]['featureSchemaId']
# Answer feature schema id (select one of the answers)
class_options_feature_schema_id = i['classifications'][0]['options'][0]['featureSchemaId']
# Update the original annotation with the schema ids
for frame in nested_ndjson['segments']:
for k in frame['keyframes']:
k['classifications'][0].update(
{'schemaId': class_feature_schema_id ,
'answer': {'schemaId': class_options_feature_schema_id}
}
)
return nested_ndjson
Step 1: Import data rows into Catalog
# send a sample image as batch to the project
test_img_url = {
"row_data": "https://storage.googleapis.com/labelbox-datasets/video-sample-data/sample-video-2.mp4",
"global_key": str(uuid.uuid4())
}
dataset = client.create_dataset(name="video_prediction_demo")
data_row = dataset.create_data_row(test_img_url)
print(data_row)
Step 2: Create/select an ontology for your model predictions
Your project should have the correct ontology setup with all the tools and classifications supported for your predictions and the tool and classifications name
should match the name
fields in your predictions to ensure the correct feature schemas are matched.
ontology_builder = lb.OntologyBuilder(
classifications=[ # List of Classification objects
lb.Classification( # Radio classification given the name "text" with two options: "first_radio_answer" and "second_radio_answer"
class_type=lb.Classification.Type.RADIO,
name="radio_question",
options=[
lb.Option(value="first_radio_answer"),
lb.Option(value="second_radio_answer")
]
),
lb.Classification(
class_type=lb.Classification.Type.CHECKLIST,
name="checklist_question",
options=[
lb.Option(value="first_checklist_answer"),
lb.Option(value="second_checklist_answer")
]
),
lb.Classification(
class_type=lb.Classification.Type.RADIO,
name="nested_radio_question",
options=[
lb.Option("first_radio_answer",
options=[
lb.Classification(
class_type=lb.Classification.Type.RADIO,
name="sub_radio_question",
options=[lb.Option("first_sub_radio_answer")]
)
]
)
]
),
lb.Classification(
class_type=lb.Classification.Type.CHECKLIST,
name="nested_checklist_question",
options=[
lb.Option(value="first_checklist_answer",
options=[
lb.Classification(
class_type=lb.Classification.Type.CHECKLIST,
name="sub_checklist_question",
options=[lb.Option("first_sub_checklist_answer")]
)
]
)
]
)
],
tools=[ # List of Tool objects
lb.Tool( # Bounding Box tool given the name "bbox_video"
tool=lb.Tool.Type.BBOX,
name="bbox_video"),
lb.Tool( // Bounding Box tool given the name "box"
tool=lb.Tool.Type.BBOX,
name="bbox_with_radio_subclass",
classifications=[
lb.Classification(
class_type=lb.Classification.Type.RADIO,
name="sub_radio_question",
options=[
lb.Option(value="first_sub_radio_answer")
]
),
]
),
lb.Tool( # Point tool given the name "point"
tool=lb.Tool.Type.POINT,
name="point_video"),
lb.Tool( # Polyline tool given the name "line"
tool=lb.Tool.Type.LINE,
name="line_video_frame")]
)
ontology = client.create_ontology("Video Prediction Import Demo",
ontology_builder.asdict(),
media_type=lb.MediaType.Video)
# Required to run set_feature_schema_id helper method
features_schema = ontology.normalized
Step 3: Create a Model and model run
# create Model
model = client.create_model(name="video_model_run_" + str(uuid.uuid4()),
ontology_id=ontology.uid)
# create model run
model_run = model.create_model_run("iteration 1")
Step 4: Send data rows to the model run
model_run.upsert_data_rows([data_row.uid])
Step 5: Create the predictions payload
Create the annotations payload using the snippets of code here. Labelbox only supports NDJSON annotation payloads for importing video annotations.
# Set the schema ids for the nested prediction annotation
set_feature_schema_id(features_schema, bbox_with_radio_subclass_prediction_ndjson)
# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations
label_ndjson = []
for prediction in [
radio_prediction_ndjson,
checklist_prediction_ndjson,
bbox_prediction_ndjson,
bbox_with_radio_subclass_prediction_ndjson,
point_prediction_ndjson,
polyline_prediction_ndjson,
nested_radio_prediction_ndjson,
nested_checklist_prediction_ndjson
]:
prediction.update({
'dataRow': {'id': data_row.uid},
})
label_ndjson.append(prediction)
Step 6: Upload the predictions payload to the model run
# Upload the prediction label to the Model Run
upload_job_prediction = model_run.add_predictions(
name="prediction_upload_job"+str(uuid.uuid4()),
predictions=label_ndjson)
# Errors will appear for annotation uploads that failed.
print("Errors:", upload_job_prediction.errors)
Step 7: Send annotations to the model run (optional)
# 7.1 Create a labelbox project
# Create a Labelbox project
project = client.create_project(name="video_prediction_demo",
# Quality Settings setup
auto_audit_percentage=1,
auto_audit_number_of_labels=1,
media_type=lb.MediaType.Video)
project.setup_editor(ontology)
# 7.2 Create a batch to send to the project
project.create_batch(
"batch_video_prediction_demo", // Each batch in a project must have a unique name
dataset.export_data_rows(), /// A list of data rows or data row ids
5 // priority between 1(Highest) - 5(lowest)
)
# 7.3 create the annotations payload
# See here for more details:
# https://docs.labelbox.com/reference/import-video-annotations#supported-annotations
radio_annotation_ndjson ...
checklist_annotation_ndjson ...
bbox_annotation_ndjson ...
point_annotation_ndjson ....
polyline_annotation_ndjson ...
nested_radio_annotation_ndjson ...
nested_checklist_prediction_ndjson ...
bbox_with_radio_subclass_annotation_ndjson ...
# 7.4 Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations
ndjson_annotation = []
for annot in [
radio_annotation_ndjson,
checklist_annotation_ndjson,
bbox_annotation_ndjson,
point_annotation_ndjson,
polyline_annotation_ndjson,
nested_radio_annotation_ndjson,
nested_checklist_prediction_ndjson,
bbox_with_radio_subclass_annotation_ndjson
]:
annot.update({
'dataRow': {'id': data_row.uid},
})
ndjson_annotation.append(annot)
# 7.5 Upload annotations to the project using Label Import
upload_job_annotation = lb.LabelImport.create_from_objects(
client = client,
project_id = project.uid,
name="video_annotations_import_" + str(uuid.uuid4()),
labels=ndjson_annotation)
upload_job_annotation.wait_until_done()
# Errors will appear for annotation uploads that failed.
print("Errors:", upload_job_annotation.errors)
# 7.6 Send the annotations to the Model Run
# get the labels id from the project
label_ids = [x['ID'] for x in project.export_labels(download=True)]
model_run.upsert_labels(label_ids)
End-to-end Python tutorial
Open this Colab for an end-to-end tutorial on uploading video predictions.