How to upload predictions on video data in a model run and sample upload formats.
VideoObjectAnnotation
objects.Nested ClassificationAnnotation
objects inside of a VideoObjectAnnotation
can still have a confidence score.# bbox dimensions
bbox_dm = {
"top":617,
"left":1371,
"height":419,
"width":505
}
bbox_prediction = [
lb_types.VideoObjectAnnotation(
name = "bbox_video",
keyframe=True,
frame=13,
segment_index=0,
value = lb_types.Rectangle(
start=lb_types.Point(x=bbox_dm["left"], y=bbox_dm["top"]), # x = left, y = top
end=lb_types.Point(x=bbox_dm["left"] + bbox_dm["width"], y=bbox_dm["top"] + bbox_dm["height"]), # x= left + width , y = top + height
)
),
lb_types.VideoObjectAnnotation(
name = "bbox_video",
keyframe=True,
frame=15,
segment_index=0,
value = lb_types.Rectangle(
start=lb_types.Point(x=bbox_dm["left"], y=bbox_dm["top"]),
end=lb_types.Point(x=bbox_dm["left"] + bbox_dm["width"], y=bbox_dm["top"] + bbox_dm["height"]),
)
),
lb_types.VideoObjectAnnotation(
name = "bbox_video",
keyframe=True,
frame=19,
segment_index=0,
value = lb_types.Rectangle(
start=lb_types.Point(x=bbox_dm["left"], y=bbox_dm["top"]),
end=lb_types.Point(x=bbox_dm["left"] + bbox_dm["width"], y=bbox_dm["top"] + bbox_dm["height"]),
)
)
]
point_prediction = [
lb_types.VideoObjectAnnotation(
name = "point_video",
keyframe=True,
frame=17,
value = lb_types.Point(x=660.134, y=407.926),
)
]
polyline_prediction = [
lb_types.VideoObjectAnnotation(
name = "line_video_frame",
keyframe=True,
frame=5,
segment_index=0,
value=lb_types.Line(
points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]
)
),
lb_types.VideoObjectAnnotation(
name = "line_video_frame",
keyframe=True,
frame=12,
segment_index=0,
value=lb_types.Line(
points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]
)
),
lb_types.VideoObjectAnnotation(
name = "line_video_frame",
keyframe=True,
frame=20,
segment_index=0,
value=lb_types.Line(
points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]
)
),
lb_types.VideoObjectAnnotation(
name = "line_video_frame",
keyframe=True,
frame=24,
segment_index=1,
value=lb_types.Line(
points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]
)
),
lb_types.VideoObjectAnnotation(
name = "line_video_frame",
keyframe=True,
frame=45,
segment_index=1,
value=lb_types.Line(
points=[lb_types.Point(x=680, y=100), lb_types.Point(x=100, y=190)]
)
)
]
global_radio_prediction = [lb_types.ClassificationAnnotation(
name="radio_class_global",
value=lb_types.Radio(answer = lb_types.ClassificationAnswer(
name = "first_radio_answer",
confidence=0.5
))
)]
radio_prediction = [
lb_types.VideoClassificationAnnotation(
name="radio_class",
frame=9,
segment_index=0,
value=lb_types.Radio(answer = lb_types.ClassificationAnswer(
name = "first_radio_answer",
confidence=0.5
))
),
lb_types.VideoClassificationAnnotation(
name="radio_class",
frame=15,
segment_index=0,
value=lb_types.Radio(answer = lb_types.ClassificationAnswer(
name = "first_radio_answer",
confidence=0.5
))
)
]
global_checklist_prediction=[lb_types.ClassificationAnnotation(
name="checklist_class_global",
value=lb_types.Checklist(
answer = [
lb_types.ClassificationAnswer(
name = "first_checklist_answer",
confidence=0.5
),
lb_types.ClassificationAnswer(
name = "second_checklist_answer",
confidence=0.5
)
]
)
)]
checklist_prediction= [
lb_types.VideoClassificationAnnotation(
name="checklist_class",
frame=29,
segment_index=0,
value=lb_types.Checklist(
answer = [
lb_types.ClassificationAnswer(
name = "first_checklist_answer",
confidence=0.5
)
]
)
),
lb_types.VideoClassificationAnnotation(
name="checklist_class",
frame=35,
segment_index=0,
value=lb_types.Checklist(
answer = [
lb_types.ClassificationAnswer(
name = "first_checklist_answer",
confidence=0.5
)
]
)
),
lb_types.VideoClassificationAnnotation(
name="checklist_class",
frame=39,
segment_index=1,
value=lb_types.Checklist(
answer = [
lb_types.ClassificationAnswer(
name = "second_checklist_answer",
confidence=0.5
)
]
)
),
lb_types.VideoClassificationAnnotation(
name="checklist_class",
frame=45,
segment_index=1,
value=lb_types.Checklist(
answer = [
lb_types.ClassificationAnswer(
name = "second_checklist_answer",
confidence=0.5
)
]
)
)
]
nested_radio_prediction =[lb_types.ClassificationAnnotation(
name="nested_radio_question",
value=lb_types.Radio(
answer=lb_types.ClassificationAnswer(
name="first_radio_answer",
confidence=0.5 ,
classifications=[
lb_types.ClassificationAnnotation(
name="sub_radio_question",
value=lb_types.Radio(
answer=lb_types.ClassificationAnswer(
name="first_sub_radio_answer",
confidence=0.5
)
)
)
]
)
)
)]
nested_checklist_prediction = [lb_types.ClassificationAnnotation(
name="nested_checklist_question",
value=lb_types.Checklist(
answer=[lb_types.ClassificationAnswer(
name="first_checklist_answer",
confidence=0.5 ,
classifications=[
lb_types.ClassificationAnnotation(
name="sub_checklist_question",
value=lb_types.Checklist(
answer=[lb_types.ClassificationAnswer(
name="first_sub_checklist_answer",
confidence=0.5
)]
))
]
)]
)
)]
bbox_dm2 = {
"top": 146.0,
"left": 98.0,
"height": 382.0,
"width": 341.0
}
frame_bbox_with_checklist_subclass_prediction = [
lb_types.VideoObjectAnnotation(
name = "bbox_class",
keyframe=True,
frame=10,
segment_index=0,
value = lb_types.Rectangle(
start=lb_types.Point(x=bbox_dm2["left"], y=bbox_dm2["top"]), # x = left, y = top
end=lb_types.Point(x=bbox_dm2["left"] + bbox_dm2["width"], y=bbox_dm2["top"] + bbox_dm2["height"]), # x= left + width , y = top + height
)
),
lb_types.VideoObjectAnnotation(
name = "bbox_class",
keyframe=True,
frame=11,
segment_index=0,
value = lb_types.Rectangle(
start=lb_types.Point(x=bbox_dm2["left"], y=bbox_dm2["top"]),
end=lb_types.Point(x=bbox_dm2["left"] + bbox_dm2["width"], y=bbox_dm2["top"] + bbox_dm2["height"]),
),
classifications=[
lb_types.ClassificationAnnotation(
name='checklist_class',
value=lb_types.Checklist(answer=[lb_types.ClassificationAnswer(
name="first_checklist_answer",
confidence=0.5
)])
)
]
),
lb_types.VideoObjectAnnotation(
name = "bbox_class",
keyframe=True,
frame=13,
segment_index=0,
value = lb_types.Rectangle(
start=lb_types.Point(x=bbox_dm2["left"], y=bbox_dm2["top"]),
end=lb_types.Point(x=bbox_dm2["left"] + bbox_dm2["width"], y=bbox_dm2["top"] + bbox_dm2["height"]),
),
classifications=[
lb_types.ClassificationAnnotation(
name='checklist_class',
value=lb_types.Checklist(answer=[lb_types.ClassificationAnswer(
name="second_checklist_answer",
confidence=0.5
)])
)
]
)
]
text_prediction = [lb_types.ClassificationAnnotation(
name="free_text", # must match your ontology feature's name
value=lb_types.Text(answer="sample text", confidence=0.5)
)]
import labelbox as lb
import uuid
API_KEY
with a valid API key to connect to the Labelbox client.
API_KEY = None
client = lb.Client(API_KEY)
global_key = "sample-video-2.mp4"
test_img_url = {
"row_data": "https://storage.googleapis.com/labelbox-datasets/video-sample-data/sample-video-2.mp4",
"global_key": global_key
}
dataset = client.create_dataset(
name="Video prediction demo",
iam_integration=None # Removing this argument will default to the organziation's default iam integration
)
task = dataset.create_data_rows([test_img_url])
task.wait_till_done()
print("Errors: ",task.errors)
print("Failed data rows: ",task.failed_data_rows)
name
should match the name
fields in your predictions to ensure the correct feature schemas are matched.
ontology_builder = lb.OntologyBuilder(
tools=[
lb.Tool(tool=lb.Tool.Type.BBOX, name="bbox_video"),
lb.Tool(tool=lb.Tool.Type.POINT, name="point_video"),
lb.Tool(tool=lb.Tool.Type.LINE, name="line_video_frame"),
lb.Tool(
tool=lb.Tool.Type.BBOX, name="bbox_class",
classifications=[
lb.Classification(
class_type=lb.Classification.Type.CHECKLIST,
name="checklist_class",
scope = lb.Classification.Scope.INDEX, ## defined scope for frame classifications
options=[
lb.Option(value="first_checklist_answer"),
lb.Option(value="second_checklist_answer")
]
)
]
)
],
classifications=[
lb.Classification(
class_type=lb.Classification.Type.CHECKLIST,
name="checklist_class",
scope = lb.Classification.Scope.INDEX, ## defined scope for frame classifications
options=[
lb.Option(value="first_checklist_answer"),
lb.Option(value="second_checklist_answer")
]
),
lb.Classification(
class_type=lb.Classification.Type.RADIO,
name="radio_class",
scope = lb.Classification.Scope.INDEX,
options=[
lb.Option(value="first_radio_answer"),
lb.Option(value="second_radio_answer")
]
),
lb.Classification(
class_type=lb.Classification.Type.RADIO,
name="nested_radio_question",
options=[
lb.Option("first_radio_answer",
options=[
lb.Classification(
class_type=lb.Classification.Type.RADIO,
name="sub_radio_question",
options=[lb.Option("first_sub_radio_answer")]
)
]
)
]
),
lb.Classification(
class_type=lb.Classification.Type.CHECKLIST,
name="nested_checklist_question",
options=[
lb.Option("first_checklist_answer",
options=[
lb.Classification(
class_type=lb.Classification.Type.CHECKLIST,
name="sub_checklist_question",
options=[lb.Option("first_sub_checklist_answer")]
)
]
)
]
),
lb.Classification(
class_type=lb.Classification.Type.RADIO,
name="radio_class_global",
options=[
lb.Option(value="first_radio_answer"),
lb.Option(value="second_radio_answer")
]
),
lb.Classification(
class_type=lb.Classification.Type.CHECKLIST,
name="checklist_class_global",
options=[
lb.Option(value="first_checklist_answer"),
lb.Option(value="second_checklist_answer")
]
),
lb.Classification(
class_type=lb.Classification.Type.TEXT,
name="free_text"
)
]
)
ontology = client.create_ontology("Ontology Video Annotations",
ontology_builder.asdict(),
media_type=lb.MediaType.Video
)
# create Model
model = client.create_model(name="video_model_run_" + str(uuid.uuid4()),
ontology_id=ontology.uid)
# create model run
model_run = model.create_model_run("iteration 1")
model_run.upsert_data_rows(global_keys=[global_key])
label_predictions = []
annotations_list = [
point_prediction,
bbox_prediction,
polyline_prediction,
checklist_prediction,
radio_prediction,
nested_radio_prediction,
nested_checklist_prediction,
frame_bbox_with_checklist_subclass_prediction,
global_radio_prediction,
global_checklist_prediction,
text_prediction
]
flatten_list_annotations = [ann for ann_sublist in annotations_list for ann in ann_sublist]
label_predictions.append(
lb_types.Label(
data={"global_key" : global_key },
annotations = flatten_list_annotations
)
)
# Upload the prediction label to the Model Run
upload_job_prediction = model_run.add_predictions(
name="prediction_upload_job"+str(uuid.uuid4()),
predictions=label_predictions)
# Errors will appear for annotation uploads that failed.
print("Errors:", upload_job_prediction.errors)
print("Status of uploads: ", upload_job_prediction.statuses)
# 7.1 Create a labelbox project
# Create a Labelbox project
project = client.create_project(name="video_prediction_demo",
media_type=lb.MediaType.Video)
project.connect_ontology(ontology)
# 7.2 Create a batch to send to the project
project.create_batch(
"batch_video_prediction_demo", # Each batch in a project must have a unique name
global_keys=[global_key], # A list of data rows, data row ids or global keys
priority=5 # priority between 1(Highest) - 5(lowest)
)
# 7.3 create the annotations payload
# See here for more details:
# /reference/import-video-annotations#supported-annotations
checklist_annotation ...
radio_annotation ...
bbox_annotation ...
frame_bbox_with_checklist_subclass ...
point_annotation ...
polyline_annotation ...
global_checklist_annotation ...
global_radio_annotation ...
nested_checklist_annotation ...
nested_radio_annotation ...
text_annotation ...
# 7.4 Create a Label object by identifying the applicable data row in Labelbox and providing a list of annotations
labels = []
annotations_list = [
checklist_annotation,
radio_annotation,
bbox_annotation,
frame_bbox_with_checklist_subclass,
point_annotation,
polyline_annotation,
global_checklist_annotation,
global_radio_annotation,
nested_checklist_annotation,
nested_radio_annotation,
text_annotation
]
flatten_list_annotations = [ann for ann_sublist in annotations_list for ann in ann_sublist]
labels.append(
lb_types.Label(
data={"global_key" : global_key },
annotations = flatten_list_annotations
)
)
# 7.5 Upload annotations to the project using Label Import
upload_job_annotation = lb.LabelImport.create_from_objects(
client = client,
project_id = project.uid,
name="video_annotations_import_" + str(uuid.uuid4()),
labels=labels)
upload_job_annotation.wait_until_done()
# Errors will appear for annotation uploads that failed.
print("Errors:", upload_job_annotation.errors)
print("Status of uploads: ", upload_job_annotation.statuses)
# Errors will appear for annotation uploads that failed.
print("Errors:", upload_job_annotation.errors)
# 7.6 Send the annotations to the Model Run
# get the labels id from the project
model_run.upsert_labels(project_id=project.uid)