Upload video predictions

How to upload predictions on video data in a model run and sample upload formats.

Open this Colab for an interactive tutorial on uploading predictions and annotations on video in a model run.

Supported annotations

To upload predictions in Labelbox, you need to create a predictions payload. In this section, we provide this payload for every supported prediction type.

📘

Uploading confidence scores is optional

If you do not specify a confidence score, the prediction will be treated as if it had a confidence score of 1. Note that confidence scores are not supported for VideoObjectAnnotation objects.

Nested ClassificationAnnotation objects inside of a VideoObjectAnnotation can still have a confidence score.

Bounding box

# bbox dimensions 
bbox_dm = {
  "top":617,
  "left":1371,
  "height":419,
  "width":505
}

bbox_prediction = [
  lb_types.VideoObjectAnnotation(
    name = "bbox_video",  
    keyframe=True,
    frame=13,
    segment_index=0,
    value = lb_types.Rectangle(
          start=lb_types.Point(x=bbox_dm["left"], y=bbox_dm["top"]), # x = left, y = top 
          end=lb_types.Point(x=bbox_dm["left"] + bbox_dm["width"], y=bbox_dm["top"] + bbox_dm["height"]), # x= left + width , y = top + height
      )
  ),
  lb_types.VideoObjectAnnotation(
    name = "bbox_video",  
    keyframe=True,
    frame=15,
    segment_index=0,
    value = lb_types.Rectangle(
          start=lb_types.Point(x=bbox_dm["left"], y=bbox_dm["top"]),
          end=lb_types.Point(x=bbox_dm["left"] + bbox_dm["width"], y=bbox_dm["top"] + bbox_dm["height"]),
      )
  ),
  lb_types.VideoObjectAnnotation(
    name = "bbox_video",  
    keyframe=True,
    frame=19,
    segment_index=0,
    value = lb_types.Rectangle(
          start=lb_types.Point(x=bbox_dm["left"], y=bbox_dm["top"]), 
          end=lb_types.Point(x=bbox_dm["left"] + bbox_dm["width"], y=bbox_dm["top"] + bbox_dm["height"]),
      )
  )
]
# bbox dimensions 
bbox_dm = {
  "top":617,
  "left":1371,
  "height":419,
  "width":505
}

bbox_prediction_ndjson = {
    "name" : "bbox_video",
    "segments" : [{
        "keyframes" : [
            {
              "frame": 13,
              "bbox" : bbox_dm 
           },
           {
              "frame": 15,
              "bbox" : bbox_dm 
           },
           {
              "frame": 19,
              "bbox" : bbox_dm 
           }
        ]
      }
    ]
}

Point

point_prediction = [
    lb_types.VideoObjectAnnotation(
        name = "point_video",
        keyframe=True,
        frame=17,
        value = lb_types.Point(x=660.134, y=407.926),
        )
]

point_prediction_ndjson = {
    "name": "point_video", 
    "confidence": 0.5,
    "segments": [{
        "keyframes": [{
            "frame": 17,
            "point" : {
                "x": 660.134 ,
                "y": 407.926
            }
        }]
    }] 
}

Polyline


polyline_prediction = [
  lb_types.VideoObjectAnnotation(
    name = "line_video_frame",
    keyframe=True, 
    frame=5,
    segment_index=0,
    value=lb_types.Line( 
          points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]
      )
  ),
  lb_types.VideoObjectAnnotation(
    name = "line_video_frame",
    keyframe=True, 
    frame=12,
    segment_index=0,
    value=lb_types.Line( 
          points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]
      )
  ),
  lb_types.VideoObjectAnnotation(
    name = "line_video_frame",
    keyframe=True, 
    frame=20,
    segment_index=0,
    value=lb_types.Line( 
          points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]
      )
  ),
  lb_types.VideoObjectAnnotation(
    name = "line_video_frame",
    keyframe=True, 
    frame=24,
    segment_index=1,
    value=lb_types.Line( 
          points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]
      )
  ),
  lb_types.VideoObjectAnnotation(
    name = "line_video_frame",
    keyframe=True, 
    frame=45,
    segment_index=1,
    value=lb_types.Line( 
          points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]
      )
  )
  
]
polyline_prediction_ndjson = {
  "name": "line_video_frame", 
  "segments": [
      {
        "keyframes": [
          {
            "frame": 5,
            "line": [{
              "x": 680,
              "y": 100
            },{
              "x": 100,
              "y": 190
            },{
              "x": 190,
              "y": 220
            }]
          },
          {
            "frame": 12,
            "line": [{
              "x": 680,
              "y": 280
            },{
              "x": 300,
              "y": 380
            },{
              "x": 400,
              "y": 460
            }]
          },
          {
            "frame": 20,
            "line": [{
              "x": 680,
              "y": 180
            },{
              "x": 100,
              "y": 200
            },{
              "x": 200,
              "y": 260
            }]
          }
        ]
      },
      {
        "keyframes": [
          {
            "frame": 24,
            "line": [{
              "x": 300,
              "y": 310
            },{
              "x": 330,
              "y": 430
            }]
          },
          {
            "frame": 45,
            "line": [{
              "x": 600,
              "y": 810
            },{
              "x": 900,
              "y": 930
            }]
          }
        ]
      }
    ]
}

Classification: Radio (global)

global_radio_prediction = [lb_types.ClassificationAnnotation(
    name="radio_class_global",
    value=lb_types.Radio(answer = lb_types.ClassificationAnswer(
      name = "first_radio_answer",
      confidence=0.5 
      ))
)]

global_radio_classification_ndjson = {
    "name": "radio_class_global", 
    "answer": { "name": "first_radio_answer", "confidence": 0.5}
}

Classification: Radio (frame-based)

radio_prediction = [
    lb_types.VideoClassificationAnnotation(
        name="radio_class", 
        frame=9,
        segment_index=0,
        value=lb_types.Radio(answer = lb_types.ClassificationAnswer(
          name = "first_radio_answer",
          confidence=0.5 
          ))
    ),
    lb_types.VideoClassificationAnnotation(
        name="radio_class", 
        frame=15,
        segment_index=0,
        value=lb_types.Radio(answer = lb_types.ClassificationAnswer(
          name = "first_radio_answer",
          confidence=0.5 
          ))
    )
]
frame_radio_classification_prediction_ndjson = {
    "name": "radio_class", 
    "answer": { "name": "first_radio_answer", "frames": [{"start": 9, "end": 15}]}
}

Classification: Checklist (global)

global_checklist_prediction=[lb_types.ClassificationAnnotation(
  name="checklist_class_global", 
  value=lb_types.Checklist(
      answer = [
        lb_types.ClassificationAnswer(
            name = "first_checklist_answer",
            confidence=0.5 
        ), 
        lb_types.ClassificationAnswer(
            name = "second_checklist_answer",
            confidence=0.5 
        )
      ]
    )
 )]
global_checklist_classification_ndjson = {
    "name": "checklist_class_global", 
    "answer": [
        { "name": "first_checklist_answer" , "confidence": 0.5},
        { "name": "second_checklist_answer", "confidence": 0.5} 
  ]      
}

Classification: Checklist (frame-based)


checklist_prediction= [
    lb_types.VideoClassificationAnnotation(
        name="checklist_class",
        frame=29,
        segment_index=0,
        value=lb_types.Checklist(
            answer = [
                lb_types.ClassificationAnswer(
                    name = "first_checklist_answer",
                    confidence=0.5 
                )
            ]
            )
        ),
    lb_types.VideoClassificationAnnotation(
        name="checklist_class", 
        frame=35,
        segment_index=0,
        value=lb_types.Checklist(
            answer = [
                lb_types.ClassificationAnswer(
                    name = "first_checklist_answer",
                    confidence=0.5 
                )
            ]
            )
        ),
    lb_types.VideoClassificationAnnotation(
        name="checklist_class", 
        frame=39, 
        segment_index=1,
        value=lb_types.Checklist(
            answer = [
                lb_types.ClassificationAnswer(
                    name = "second_checklist_answer",
                    confidence=0.5 
                )
            ]
            )
        ),
    lb_types.VideoClassificationAnnotation(
        name="checklist_class", 
        frame=45, 
        segment_index=1,
        value=lb_types.Checklist(
            answer = [
                
                lb_types.ClassificationAnswer(
                    name = "second_checklist_answer",
                    confidence=0.5 
                )
            ]
            )
        )
]

frame_checklist_classification_prediction_ndjson = {
    "name": "checklist_class", 
    "answer": [
        { "name": "first_checklist_answer" , "frames": [{"start": 29, "end": 35 }]},
        { "name": "second_checklist_answer", "frames": [{"start": 39, "end": 45 }]} 
  ]      
}

Classification: Nested radio (global)

nested_radio_prediction =[lb_types.ClassificationAnnotation(
  name="nested_radio_question",
  value=lb_types.Radio(
    answer=lb_types.ClassificationAnswer(
      name="first_radio_answer",
      confidence=0.5 ,
      classifications=[
        lb_types.ClassificationAnnotation(
          name="sub_radio_question",
          value=lb_types.Radio(
            answer=lb_types.ClassificationAnswer(
              name="first_sub_radio_answer",
              confidence=0.5 
            )
          )
        )
      ]
    )
  )
)]
nested_radio_prediction_ndjson = {
  "name": "nested_radio_question",
  "answer": {"name": "first_radio_answer", "confidence": 0.5,
  "classifications" : [
    {"name": "sub_radio_question", "answer": {"name": "first_sub_radio_answer", "confidence": 0.5}}
   ]
  }
}

Classification: Nested checklist (global)

nested_checklist_prediction = [lb_types.ClassificationAnnotation(
  name="nested_checklist_question",
  value=lb_types.Checklist(
    answer=[lb_types.ClassificationAnswer(
      name="first_checklist_answer",
      confidence=0.5 ,
      classifications=[
        lb_types.ClassificationAnnotation(
          name="sub_checklist_question",
          value=lb_types.Checklist(
            answer=[lb_types.ClassificationAnswer(
            name="first_sub_checklist_answer",
            confidence=0.5 
          )]
        ))
      ]
    )]
  )
)]
nested_checklist_prediction_ndjson = {
  "name": "nested_checklist_question",
  "answer": [{
      "name": "first_checklist_answer",
      "confidence": 0.5,
      "classifications" : [
        {
          "name": "sub_checklist_question",
          "answer": {"name": "first_sub_checklist_answer", "confidence": 0.5}
        }
      ]
  }]
}

Bounding box with sub-classifications (frame-based)

bbox_dm2 = {
  "top": 146.0,
  "left": 98.0,
  "height": 382.0,
  "width": 341.0
}

frame_bbox_with_checklist_subclass_prediction = [
  lb_types.VideoObjectAnnotation(
    name = "bbox_class",  
    keyframe=True,
    frame=10,
    segment_index=0,
    value = lb_types.Rectangle(
          start=lb_types.Point(x=bbox_dm2["left"], y=bbox_dm2["top"]), # x = left, y = top 
          end=lb_types.Point(x=bbox_dm2["left"] + bbox_dm2["width"], y=bbox_dm2["top"] + bbox_dm2["height"]), # x= left + width , y = top + height
      )
  ),
  lb_types.VideoObjectAnnotation(
    name = "bbox_class",  
    keyframe=True,
    frame=11,
    segment_index=0,
    value = lb_types.Rectangle(
          start=lb_types.Point(x=bbox_dm2["left"], y=bbox_dm2["top"]), 
          end=lb_types.Point(x=bbox_dm2["left"] + bbox_dm2["width"], y=bbox_dm2["top"] + bbox_dm2["height"]),
      ),
    classifications=[
                lb_types.ClassificationAnnotation(
                    name='checklist_class',
                    value=lb_types.Checklist(answer=[lb_types.ClassificationAnswer(
                        name="first_checklist_answer",
                        confidence=0.5 
                    )])
                )
            ]
  ),
  lb_types.VideoObjectAnnotation(
    name = "bbox_class",  
    keyframe=True,
    frame=13,
    segment_index=0,
    value = lb_types.Rectangle(
          start=lb_types.Point(x=bbox_dm2["left"], y=bbox_dm2["top"]), 
          end=lb_types.Point(x=bbox_dm2["left"] + bbox_dm2["width"], y=bbox_dm2["top"] + bbox_dm2["height"]),
      ),
    classifications=[
              lb_types.ClassificationAnnotation(
                  name='checklist_class',
                  value=lb_types.Checklist(answer=[lb_types.ClassificationAnswer(
                      name="second_checklist_answer",
                      confidence=0.5 
                  )])
              )
          ]
  )
]
frame_bbox_with_checklist_subclass_prediction_ndjson = {
    "name": "bbox_class",
    "segments": [{
        "keyframes": [
            {
            "frame": 10,
            "bbox": bbox_dm2
          },
          {  
          "frame": 11,
            "bbox": bbox_dm2,
            "classifications": [
              {
                "name": "bbox_radio",
                "answer": [{"name": "first_checklist_answer", "confidence": 0.5}]
              }
            ]
          },
          {  
          "frame": 13,
            "bbox": bbox_dm2,
            "classifications": [
              {
                "name": "bbox_radio",
                "answer": [{"name": "second_checklist_answer", "confidence": 0.5}]
              }
            ]
          }
        ]
      }
    ]
}

Text

text_prediction = [lb_types.ClassificationAnnotation(
  name="free_text",  # must match your ontology feature's name
  value=lb_types.Text(answer="sample text", confidence=0.5)
)]
text_prediction_ndjson = {
  'name': 'free_text',
  'confidence': 0.5,
  'answer': 'sample text',
}

Masks

Raster segmentation masks are not yet supported in the Model product.

End-to-end example: Upload predictions to a Model Run

Follow the steps below to upload predictions to a model run.

Before you start

You will need to import these libraries to use the code examples in this section:

import labelbox as lb
import uuid

Replace with your API key

To learn how to create an API key, please follow the instructions on this page.

API_KEY = ""
client = lb.Client(API_KEY)

Step 1: Import data rows into Catalog

global_key = "sample-video-2.mp4"
test_img_url = {
    "row_data": "https://storage.googleapis.com/labelbox-datasets/video-sample-data/sample-video-2.mp4",
    "global_key": global_key
}
dataset = client.create_dataset(
    name="Video prediction demo",
    iam_integration=None # Removing this argument will default to the organziation's default iam integration
)
task = dataset.create_data_rows([test_img_url])
task.wait_till_done()
print("Errors: ",task.errors)
print("Failed data rows: ",task.failed_data_rows)

Step 2: Create/select an ontology for your model predictions

Your project should have the correct ontology setup with all the tools and classifications supported for your predictions and the tool and classifications name should match the name fields in your predictions to ensure the correct feature schemas are matched.

ontology_builder = lb.OntologyBuilder(
    tools=[
        lb.Tool(tool=lb.Tool.Type.BBOX, name="bbox_video"),
        lb.Tool(tool=lb.Tool.Type.POINT, name="point_video"),
        lb.Tool(tool=lb.Tool.Type.LINE, name="line_video_frame"),
        lb.Tool(
          tool=lb.Tool.Type.BBOX, name="bbox_class",
          classifications=[
            lb.Classification(
            class_type=lb.Classification.Type.CHECKLIST,
            name="checklist_class",
            scope = lb.Classification.Scope.INDEX, ## defined scope for frame classifications
            options=[ 
                lb.Option(value="first_checklist_answer"),
                lb.Option(value="second_checklist_answer")
            ]
            )
          ]
        )
    ],
    classifications=[ 
        lb.Classification(
            class_type=lb.Classification.Type.CHECKLIST,
            name="checklist_class",
            scope = lb.Classification.Scope.INDEX, ## defined scope for frame classifications
            options=[ 
                lb.Option(value="first_checklist_answer"),
                lb.Option(value="second_checklist_answer")
            ]
        ),
        lb.Classification(
            class_type=lb.Classification.Type.RADIO,
            name="radio_class",
            scope = lb.Classification.Scope.INDEX,
            options=[ 
                lb.Option(value="first_radio_answer"),
                lb.Option(value="second_radio_answer")
            ]
        ),
         lb.Classification(
              class_type=lb.Classification.Type.RADIO,
              name="nested_radio_question",
              options=[
                  lb.Option("first_radio_answer",
                        options=[
                            lb.Classification(
                                class_type=lb.Classification.Type.RADIO,
                                name="sub_radio_question",
                                options=[lb.Option("first_sub_radio_answer")]
                            )
                        ]
                  )
              ] 
        ),
        lb.Classification(
          class_type=lb.Classification.Type.CHECKLIST,
          name="nested_checklist_question",
          options=[
              lb.Option("first_checklist_answer",
                options=[
                  lb.Classification(
                      class_type=lb.Classification.Type.CHECKLIST,
                      name="sub_checklist_question",
                      options=[lb.Option("first_sub_checklist_answer")]
                  )
              ]
            )
          ]
        ),
        lb.Classification(
          class_type=lb.Classification.Type.RADIO, 
          name="radio_class_global",
          options=[ 
                lb.Option(value="first_radio_answer"),
                lb.Option(value="second_radio_answer")
            ]
        ),
        lb.Classification(
          class_type=lb.Classification.Type.CHECKLIST,
          name="checklist_class_global",
          options=[
                lb.Option(value="first_checklist_answer"),
                lb.Option(value="second_checklist_answer")
          ]
        ),
        lb.Classification(
          class_type=lb.Classification.Type.TEXT,
          name="free_text"
        )
    ]  
)

ontology = client.create_ontology("Ontology Video Annotations", 
                                  ontology_builder.asdict(), 
                                  media_type=lb.MediaType.Video
                                  )

Step 3: Create a Model and model run

# create Model
model = client.create_model(name="video_model_run_" + str(uuid.uuid4()), 
                            ontology_id=ontology.uid)
# create model run
model_run = model.create_model_run("iteration 1")

Step 4: Send data rows to the model run

model_run.upsert_data_rows(global_keys=[global_key])

Step 5: Create the predictions payload

Create the annotations payload using the snippets of code here.

label_predictions = []
annotations_list = [
    point_prediction,
    bbox_prediction,
    polyline_prediction, 
    checklist_prediction, 
    radio_prediction,
    nested_radio_prediction,
    nested_checklist_prediction,
    frame_bbox_with_checklist_subclass_prediction,
    global_radio_prediction,
    global_checklist_prediction,
    text_prediction
      ]

flatten_list_annotations = [ann for ann_sublist in annotations_list for ann in ann_sublist] 

label_predictions.append(
    lb_types.Label(
        data=lb_types.VideoData(global_key=global_key),
        annotations = flatten_list_annotations
    )
)

# Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations

label_prediction_ndjson = []

for annotation in [
    point_prediction_ndjson,
    bbox_prediction_ndjson,
    polyline_prediction_ndjson, 
    frame_checklist_classification_prediction_ndjson, 
    frame_radio_classification_prediction_ndjson,
    nested_radio_prediction_ndjson,
    nested_checklist_prediction_ndjson,
    frame_bbox_with_checklist_subclass_prediction_ndjson,
    global_radio_classification_ndjson,
    global_checklist_classification_ndjson,
    video_mask_prediction_ndjson,
    text_prediction_ndjson
]:      
  annotation.update({
      "dataRow": {
          "globalKey": global_key
      }
  })
  label_prediction_ndjson.append(annotation)
     

Step 6: Upload the predictions payload to the model run

# Upload the prediction label to the Model Run
upload_job_prediction = model_run.add_predictions(
    name="prediction_upload_job"+str(uuid.uuid4()),
    predictions=label_predictions)

# Errors will appear for annotation uploads that failed.
print("Errors:", upload_job_prediction.errors)
print("Status of uploads: ", upload_job_prediction.statuses)

Step 7: Send annotations to the model run (optional)

# 7.1 Create a labelbox project 
# Create a Labelbox project
project = client.create_project(name="video_prediction_demo",
                                    media_type=lb.MediaType.Video)
project.setup_editor(ontology)

# 7.2 Create a batch to send to the project 
project.create_batch(
  "batch_video_prediction_demo", # Each batch in a project must have a unique name
  global_keys=[global_key], # A list of data rows, data row ids or global keys
  priority=5 # priority between 1(Highest) - 5(lowest)
)

# 7.3 create the annotations payload 
# See here for more details: 
# https://docs.labelbox.com/reference/import-video-annotations#supported-annotations
checklist_annotation ...
radio_annotation ...
bbox_annotation ...
frame_bbox_with_checklist_subclass ...
point_annotation ...
polyline_annotation ...
global_checklist_annotation ...
global_radio_annotation ...
nested_checklist_annotation ...
nested_radio_annotation ...
text_annotation ...

# 7.4 Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations
labels = []
annotations_list = [
          checklist_annotation, 
          radio_annotation,
          bbox_annotation, 
          frame_bbox_with_checklist_subclass,
          point_annotation, 
          polyline_annotation,
          global_checklist_annotation,
          global_radio_annotation,
          nested_checklist_annotation,
          nested_radio_annotation,
          text_annotation
      ]

flatten_list_annotations = [ann for ann_sublist in annotations_list for ann in ann_sublist] 

labels.append(
    lb_types.Label(
        data=lb_types.VideoData(global_key=global_key),
        annotations = flatten_list_annotations
    )
)

# 7.5 Upload annotations to the project using Label Import 

upload_job_annotation = lb.LabelImport.create_from_objects(
    client = client,
    project_id = project.uid,
    name="video_annotations_import_" + str(uuid.uuid4()),
    labels=labels)

upload_job_annotation.wait_until_done()
# Errors will appear for annotation uploads that failed.
print("Errors:", upload_job_annotation.errors)
print("Status of uploads: ", upload_job_annotation.statuses)

# Errors will appear for annotation uploads that failed.
print("Errors:", upload_job_annotation.errors)


# 7.6 Send the annotations to the Model Run

# get the labels id from the project
model_run.upsert_labels(project_id=project.uid)