import requests import json
instance_id = 'abcd.azuredatabricks.net'
api_version = '/api/2.0' api_command = '/jobs/create' url = f"https://{instance_id}{api_version}{api_command}"
headers = {'Authorization': 'Bearer myToken'}
params = { "settings": { "name": "Test API", "new_cluster": { "cluster_name": "", "spark_version": "8.2.x-scala2.12", "spark_conf": { "spark.databricks.delta.preview.enabled": "true", "spark.sql.shuffle.partitions": "1024" }, "node_type_id": "Standard_D16s_v3", "spark_env_vars": { "PYSPARK_PYTHON": "/databricks/python3/bin/python3" }, "enable_elastic_disk": True, "azure_attributes": { "first_on_demand": 1, "availability": "ON_DEMAND_AZURE", "spot_bid_max_price": -1.0 }, "num_workers": 8 }, "email_notifications": { "no_alert_for_skipped_runs": False }, "timeout_seconds": 0, "notebook_task": { "notebook_path": "/Repos/abcd@repo.com/notebook", "base_parameters": { "start_date": "", "end_date": "", "client": "", "client_output_sa": "", "initial_telomere_load": "", "run_validation": "", "run_telomere": "" } }, "max_concurrent_runs": 1, "format": "SINGLE_TASK" }, "creator_user_name": "abcd@repo.com" }
response = requests.post(url = url, params = params, headers = headers)
print(json.dumps(json.loads(response.text), indent = 2))
I am executing the above code (with real instance id, bearer token, and paths) and keep getting the following error:
{
"error_code": "INVALID_PARAMETER_VALUE",
"message": "Job settings must be specified."
}