Export prompt and response data

How to export prompt and response data

Open this Colab for an interactive tutorial on exporting annotations.

📘

Deprecating Export V1

Starting in April 2024, we will begin sunsetting Export V1 for customers on a rolling basis. For more details, see this guide.

Export JSON annotations

# Set the export params to include/exclude certain fields. Make sure each of these fields are correctly grabbed 
export_params= {
  "attachments": True,
  "metadata_fields": True,
  "data_row_details": True,
  "project_details": True,
  "label_details": True,
  "performance_details": True
}

# You can set the range for last_activity_at and label_created_at. 
# For context, last_activity_at captures the creation and modification of labels, metadata, status, comments and reviews.
# Note: This is an AND logic between the filters, so usually using one filter is sufficient.

filters= {
  "last_activity_at": ["2000-01-01 00:00:00", "2050-01-01 00:00:00"],
}

export_task = project.export_v2(params=export_params, filters=filters)
export_task.wait_till_done()

if export_task.errors:
  print(export_task.errors)

export_json = export_task.result
print("results: ", export_json)
# Set the export params to include/exclude certain fields. 
export_params= {
  "attachments": True,
  "metadata_fields": True,
  "data_row_details": True,
  "project_details": True,
  "label_details": True,
  "performance_details": True,
  "interpolated_frames": True
}

# Note: Filters follow AND logic, so typically using one filter is sufficient.
filters= {
  "last_activity_at": ["2000-01-01 00:00:00", "2050-01-01 00:00:00"],
  "workflow_status": "<wkf-status>"
}

client.enable_experimental = True

export_task = project.export(params=export_params, filters=filters)
export_task.wait_till_done()


# Return JSON output strings from export task results/errors, one by one:

# Callback used for JSON Converter
def json_stream_handler(output: lb.JsonConverterOutput):
  print(output.json_str)


if export_task.has_errors():
  export_task.get_stream(
  
  converter=lb.JsonConverter(),
  stream_type=lb.StreamType.ERRORS
  ).start(stream_handler=lambda error: print(error))

if export_task.has_result():
  export_json = export_task.get_stream(
    converter=lb.JsonConverter(),
    stream_type=lb.StreamType.RESULT
  ).start(stream_handler=json_stream_handler)

print("file size: ", export_task.get_total_file_size(stream_type=lb.StreamType.RESULT))
print("line count: ", export_task.get_total_lines(stream_type=lb.StreamType.RESULT))
     

Prompt export format

Humans generate prompts

{
  "feature_id": "cldne96y301wy13yd0wp5z87y",
  "feature_schema_id": "cljg9my6h01000741aemmcln8",
  "name": "sample_name",
  "text_answer": {
    "content": "sample text"
  }
}
{
    "featureId": "cknp3dugp00073g68fkudn092",
    "schemaId": "cljg9my6h01000741aemmcln8",
    "title": "sample_name",
    "value": "sample_name",
    "answer": "sample text"
}

Response export formats

Response - Radio

{
  "feature_id": "cldne96y201wq13ydu0qcc2up",
  "feature_schema_id": "cljggfygv0chn070w1v131s3v",
  "name": "sample_radio_name",
  "radio_answer": {
    "feature_id": "cldne96y201wr13yd23kr1pcr",
    "feature_schema_id": "cljgggcs200083b6lqbpml10p",
    "name": "first_radio_answer",
    "classifications": []
  }
}

Response - Checklist

{
  "feature_id": "cldne96y201wu13ydohrclpra",
  "feature_schema_id": "cljgegd1p07m4073cfmfy5xkx",
  "name": "checklist_question",
  "checklist_answers": [
    {
      "feature_id": "cldne96y301wv13ydatuxugbt",
      "name": "first_checklist_answer",
      "classifications": []
    },
    {
      "feature_id": "cldne96y301ww13yds4zkk49u",
      "name": "second_checklist_answer",
      "classifications": []
    },
    {
      "feature_id": "cldne96y301wx13ydvb5x2w6o",
      "name": "third_checklist_answer",
      "classifications": []
    }
  ]
}

Response - Text

{
  "feature_id": "cldne96y301wy13yd0wp5z87y",
  "feature_schema_id": "cljg9my6h01000741aemmcln8",
  "name": "sample_name",
  "text_answer": {
    "content": "sample text"
  }
}

Sample project export

{
  "data_row": {
    "id": "clpvnouh04uyy0723mmru42qn",
    "global_key": "clpvnou2v03js07xsghfo2nzc",
    "row_data": "{\"type\":\"application/llm.prompt-response-creation\",\"version\":1}",
    "details": {
      "dataset_id": "clpvnou0z004c0724pd4cmw8g",
      "dataset_name": "test-humans-generate-prompts-reponses-dataset",
      "created_at": "2023-12-07T20:34:06.540+00:00",
      "updated_at": "2023-12-07T20:34:06.815+00:00",
      "last_activity_at": "2024-04-10T15:21:31.000+00:00",
      "created_by": "[email protected]"
    }
  },
  "media_attributes": {
    "mime_type": "application/llm.prompt-response-creation"
  },
  "attachments": [],
  "metadata_fields": [],
  "projects": {
    "clpvnotzb03jo07xs48r7ewka": {
      "name": "Andrea-test-humans-generate-prompts-responses",
      "labels": [
        {
          "label_kind": "Default",
          "version": "1.0.0",
          "id": "clutyida4041a07h4a8ojbu1g",
          "label_details": {
            "created_at": "2024-04-10T15:21:31.000+00:00",
            "updated_at": "2024-04-10T15:21:31.000+00:00",
            "created_by": "[email protected]",
            "content_last_updated_at": "2024-04-10T15:21:31.137+00:00",
            "reviews": []
          },
          "performance_details": {
            "seconds_to_create": 28,
            "seconds_to_review": 0,
            "skipped": false
          },
          "annotations": {
            "objects": [],
            "classifications": [
              {
                "feature_id": "clutykado00013b6rw65zj51e",
                "feature_schema_id": "clutyjef000xk07wfeurhc2qb",
                "name": "Is this a shirt?",
                "value": "Is this a shirt?",
                "text_answer": {
                  "content": "Potentially this is a shirt, but keep in mind this is not a good quality prompt"
                }
              },
              {
                "feature_id": "clutykap700033b6rmmlkpo6u",
                "feature_schema_id": "clutyjef000xm07wf5ym80ud6",
                "name": "Yes",
                "value": "yes",
                "radio_answer": {
                  "feature_id": "clutykap700023b6rhktauyzc",
                  "feature_schema_id": "clutyjef000xn07wf6smvc54b",
                  "name": "Red shirt",
                  "value": "red_shirt",
                  "classifications": []
                }
              }
            ],
            "relationships": []
          }
        }
      ],
      "project_details": {
        "ontology_id": "clpvnqsqz01kv07zydvbscxzq",
        "task_id": "14b02ec0-71f3-4d1f-b720-c6318e9a9346",
        "task_name": "Initial review task",
        "batch_id": "00870bc0-9540-11ee-a202-8d3b90bd1707",
        "batch_name": "batch_clpvnotzb03jo07xs48r7ewka",
        "workflow_status": "IN_REVIEW",
        "priority": 5,
        "consensus_expected_label_count": 2,
        "workflow_history": [
          {
            "action": "Move",
            "created_at": "2024-04-10T15:21:31.517+00:00",
            "created_by": "[email protected]",
            "previous_task_name": "Initial labeling task",
            "previous_task_id": "1d0062f9-dfd9-0f86-baed-823235868a8c",
            "next_task_name": "Initial review task",
            "next_task_id": "14b02ec0-71f3-4d1f-b720-c6318e9a9346"
          },
          {
            "action": "Move",
            "created_at": "2024-04-10T15:21:31.506+00:00",
            "created_by": "[email protected]",
            "next_task_name": "Initial labeling task",
            "next_task_id": "1d0062f9-dfd9-0f86-baed-823235868a8c"
          }
        ]
      }
    }
  }
}