We're hitting an error with a delta live table refresh since yesterday; nothing has changed in our system yet there appears to be a configuration error:
{
...
"timestamp": "2024-04-08T23:00:10.630Z",
"message": "Update b60485 is FAILED.",
"level": "ERROR",
"error": {
"exceptions": [
{
"message": "INVALID_PARAMETER_VALUE: \nInvalid tags - keys must be unique, but the following keys are specified multiple\ntimes: ResourceClass.\n "
}
],
"fatal": true
}
...
}
As you can see that isn't the case in the configuration shown below, so pretty stumped on what may be going on here:
{
"id": "<snip>",
"pipeline_type": "WORKSPACE",
"clusters": [
{
"label": "default",
"spark_conf": {
"spark.hadoop.fs.s3a.acl.default": "BucketOwnerFullControl",
"spark.databricks.cluster.profile": "singleNode",
"spark.master": "local[*, 4]"
},
"aws_attributes": {
"first_on_demand": 1,
"zone_id": "us-east-1f",
"instance_profile_arn": "<snip>"
},
"node_type_id": "r5.2xlarge",
"driver_node_type_id": "r5.2xlarge",
"custom_tags": {
"ResourceClass": "SingleNode"
},
"cluster_log_conf": {
"dbfs": {
"destination": "dbfs:/cluster-logs"
}
},
"num_workers": 0
}
],
"development": false,
"continuous": false,
"channel": "CURRENT",
"photon": false,
"libraries": [
{
"notebook": {
"path": "<snip>"
}
}
],
"name": "<snip>",
"edition": "PRO",
"storage": "<snip>",
"configuration": {
"db_source_bucket": "<snip>"
},
"target": "production"
}
Might anyone have thoughts? Thanks!