<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic DLT bronze tables in Data Engineering</title>
    <link>https://community.databricks.com/t5/data-engineering/dlt-bronze-tables/m-p/48940#M28431</link>
    <description>&lt;P&gt;I am trying to ingest incremental parquet files data to bronze streaming table, how much history data should be retained ideally in bronze layer as a general best practise considering I will be only using bronze to ingest source data and move it to silver streaming tables using APPLY_CHANGES_INTO?&lt;/P&gt;</description>
    <pubDate>Wed, 11 Oct 2023 11:21:52 GMT</pubDate>
    <dc:creator>Faisal</dc:creator>
    <dc:date>2023-10-11T11:21:52Z</dc:date>
    <item>
      <title>DLT bronze tables</title>
      <link>https://community.databricks.com/t5/data-engineering/dlt-bronze-tables/m-p/48940#M28431</link>
      <description>&lt;P&gt;I am trying to ingest incremental parquet files data to bronze streaming table, how much history data should be retained ideally in bronze layer as a general best practise considering I will be only using bronze to ingest source data and move it to silver streaming tables using APPLY_CHANGES_INTO?&lt;/P&gt;</description>
      <pubDate>Wed, 11 Oct 2023 11:21:52 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/dlt-bronze-tables/m-p/48940#M28431</guid>
      <dc:creator>Faisal</dc:creator>
      <dc:date>2023-10-11T11:21:52Z</dc:date>
    </item>
    <item>
      <title>Re: DLT bronze tables</title>
      <link>https://community.databricks.com/t5/data-engineering/dlt-bronze-tables/m-p/50638#M28853</link>
      <description>&lt;P class=""&gt;The amount of history data that should be retained in the bronze layer depends on your specific use case and requirements. As a general best practice, you should retain enough history data to support your downstream analytics and machine learning workloads, while also considering the cost and performance implications of storing and processing large amounts of data.&lt;/P&gt;
&lt;P class=""&gt;One approach to managing historical data in the bronze layer is to use partitioning and time-based data retention policies. For example, you can partition your data by date or time, and then use a retention policy to automatically delete or archive old partitions after a certain period of time. This can help you manage the size of your data lake and reduce storage costs, while still retaining enough historical data to support your use cases.&lt;/P&gt;</description>
      <pubDate>Wed, 08 Nov 2023 12:21:11 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/dlt-bronze-tables/m-p/50638#M28853</guid>
      <dc:creator>MuthuLakshmi</dc:creator>
      <dc:date>2023-11-08T12:21:11Z</dc:date>
    </item>
  </channel>
</rss>

