The code :
import dlt
from pyspark.sql.functions import col
@dlt.table(
name="silver_customers",
comment="Cleaned customers data from bronze"
)
@dlt.expect("valid_email", "email IS NOT NULL")
@dlt.expect("valid_customer_id", "customer_id IS NOT NULL")
def silver_customers():
return (
spark.read.table("bronze.demo.customers")
.dropDuplicates(["customer_id"])
.select(
"customer_id",
"name",
"email"
)
) and the UI i have
this table run incrementally?
Can some one confirm this ?