The documentation at https://docs.databricks.com/data-engineering/delta-live-tables/delta-live-tables-user-guide.html#mix... has an example
the first two functions load data incrementally and the last one loads it fully
@dlt.view
def incremental_bronze():
return (
# Since this is a streaming source, this table is incremental.
spark.readStream.format("cloudFiles")
.option("cloudFiles.format", "json")
.load("s3://path/to/raw/data")
)
@dlt.table
def incremental_silver():
# Since we read the bronze table as a stream, this silver table is also
# updated incrementally.
return dlt.read_stream("incremental_bronze").where(...)
@dlt.table
def complete_gold():
# This table will be recomputed completely by reading the whole silver table
# when it is updated.
return dlt.read("incremental_silver").groupBy("user_id").count()