<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic All-purpose compute clusters that are attached to a pool are no longer able to switch to a different pool/change to a non-pool worker/driver. in Data Engineering</title>
    <link>https://community.databricks.com/t5/data-engineering/all-purpose-compute-clusters-that-are-attached-to-a-pool-are-no/m-p/22701#M15592</link>
    <description>&lt;P&gt;Would like to know if anyone else is experiencing this - we're seeing this across 5+ different Databricks workspaces in both AWS and Azure.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Reproduction: &lt;/P&gt;&lt;P&gt;Create all purpose compute cluster, attach it to existing pool, save and start cluster. &lt;/P&gt;&lt;P&gt;Edit cluster, change pool or change to a instance type worker and driver, click "Confirm". Notice that changes weren't applied.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Specific instance configuration we used:&lt;/P&gt;&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper" image-alt="image"&gt;&lt;img src="https://community.databricks.com/t5/image/serverpage/image-id/1217iADDAB90A41069E49/image-size/large?v=v2&amp;amp;px=999" role="button" title="image" alt="image" /&gt;&lt;/span&gt;&lt;/P&gt;</description>
    <pubDate>Fri, 11 Nov 2022 20:39:38 GMT</pubDate>
    <dc:creator>JoeWMP</dc:creator>
    <dc:date>2022-11-11T20:39:38Z</dc:date>
    <item>
      <title>All-purpose compute clusters that are attached to a pool are no longer able to switch to a different pool/change to a non-pool worker/driver.</title>
      <link>https://community.databricks.com/t5/data-engineering/all-purpose-compute-clusters-that-are-attached-to-a-pool-are-no/m-p/22701#M15592</link>
      <description>&lt;P&gt;Would like to know if anyone else is experiencing this - we're seeing this across 5+ different Databricks workspaces in both AWS and Azure.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Reproduction: &lt;/P&gt;&lt;P&gt;Create all purpose compute cluster, attach it to existing pool, save and start cluster. &lt;/P&gt;&lt;P&gt;Edit cluster, change pool or change to a instance type worker and driver, click "Confirm". Notice that changes weren't applied.&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;Specific instance configuration we used:&lt;/P&gt;&lt;P&gt;&lt;span class="lia-inline-image-display-wrapper" image-alt="image"&gt;&lt;img src="https://community.databricks.com/t5/image/serverpage/image-id/1217iADDAB90A41069E49/image-size/large?v=v2&amp;amp;px=999" role="button" title="image" alt="image" /&gt;&lt;/span&gt;&lt;/P&gt;</description>
      <pubDate>Fri, 11 Nov 2022 20:39:38 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/all-purpose-compute-clusters-that-are-attached-to-a-pool-are-no/m-p/22701#M15592</guid>
      <dc:creator>JoeWMP</dc:creator>
      <dc:date>2022-11-11T20:39:38Z</dc:date>
    </item>
    <item>
      <title>Re: All-purpose compute clusters that are attached to a pool are no longer able to switch to a different pool/change to a non-pool worker/driver.</title>
      <link>https://community.databricks.com/t5/data-engineering/all-purpose-compute-clusters-that-are-attached-to-a-pool-are-no/m-p/22702#M15593</link>
      <description>&lt;P&gt;We're also seeing the same behavior when trying to change the pool on an all-purpose cluster using Terraform and Databricks Labs Terraform provider as well. The Terraform apply will go through and say the cluster was updated to the new pool id, but the changes have not actually been applied.&lt;/P&gt;</description>
      <pubDate>Fri, 11 Nov 2022 20:52:16 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/all-purpose-compute-clusters-that-are-attached-to-a-pool-are-no/m-p/22702#M15593</guid>
      <dc:creator>JoeWMP</dc:creator>
      <dc:date>2022-11-11T20:52:16Z</dc:date>
    </item>
  </channel>
</rss>

