<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Large datasets in Databricks in Generative AI</title>
    <link>https://community.databricks.com/t5/generative-ai/large-datasets-in-databricks/m-p/100516#M651</link>
    <description>&lt;P&gt;How can I efficiently handle large datasets in Databricks when performing group-by operations to avoid out-of-memory errors? Are there any best practices or optimizations for improving performance, such as partitioning or caching, especially when working with Spark DataFrames?&lt;/P&gt;</description>
    <pubDate>Sun, 01 Dec 2024 05:30:40 GMT</pubDate>
    <dc:creator>maltasa</dc:creator>
    <dc:date>2024-12-01T05:30:40Z</dc:date>
    <item>
      <title>Large datasets in Databricks</title>
      <link>https://community.databricks.com/t5/generative-ai/large-datasets-in-databricks/m-p/100516#M651</link>
      <description>&lt;P&gt;How can I efficiently handle large datasets in Databricks when performing group-by operations to avoid out-of-memory errors? Are there any best practices or optimizations for improving performance, such as partitioning or caching, especially when working with Spark DataFrames?&lt;/P&gt;</description>
      <pubDate>Sun, 01 Dec 2024 05:30:40 GMT</pubDate>
      <guid>https://community.databricks.com/t5/generative-ai/large-datasets-in-databricks/m-p/100516#M651</guid>
      <dc:creator>maltasa</dc:creator>
      <dc:date>2024-12-01T05:30:40Z</dc:date>
    </item>
    <item>
      <title>Re: Large datasets in Databricks</title>
      <link>https://community.databricks.com/t5/generative-ai/large-datasets-in-databricks/m-p/100633#M652</link>
      <description>&lt;P&gt;Hi,&amp;nbsp;&lt;a href="https://community.databricks.com/t5/user/viewprofilepage/user-id/134438"&gt;@maltasa&lt;/a&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;I believe this article might help answer your question.&lt;/P&gt;&lt;P&gt;&lt;A href="https://www.databricks.com/discover/pages/optimize-data-workloads-guide" target="_self"&gt;Comprehensive Guide to Optimize Databricks, Spark and Delta Lake Workloads&lt;/A&gt;&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Mon, 02 Dec 2024 14:01:42 GMT</pubDate>
      <guid>https://community.databricks.com/t5/generative-ai/large-datasets-in-databricks/m-p/100633#M652</guid>
      <dc:creator>Takuya-Omi</dc:creator>
      <dc:date>2024-12-02T14:01:42Z</dc:date>
    </item>
  </channel>
</rss>

