<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic UC_COMMAND_NOT_SUPPORTED.WITHOUT_RECOMMENDATION in shared access mode? in Get Started Discussions</title>
    <link>https://community.databricks.com/t5/get-started-discussions/uc-command-not-supported-without-recommendation-in-shared-access/m-p/75109#M7534</link>
    <description>&lt;P&gt;I'm using a shared access cluster and am getting this error while trying to upload to Qdrant.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;LI-CODE lang="python"&gt;#embeddings_df = embeddings_df.limit(5)

options = {
    "qdrant_url": QDRANT_GRPC_URL,
    "api_key": QDRANT_API_KEY,
    "collection_name": QDRANT_COLLECTION_NAME,
    "vector_fields": "dense_vector",
    "vector_names": "dense",
    "schema": embeddings_df.schema.json(),
    "batch_size":"128",
}

embeddings_df.write.format("io.qdrant.spark.Qdrant").options(**options).mode(
    "append"
).save()&lt;/LI-CODE&gt;&lt;LI-CODE lang="markup"&gt;[UC_COMMAND_NOT_SUPPORTED.WITHOUT_RECOMMENDATION] The command(s): AppendData are not supported in Unity Catalog.  SQLSTATE: 0AKUC
File &amp;lt;command-2604385794497207&amp;gt;, line 65
     48 #embeddings_df = embeddings_df.limit(5)
     50 options = {
     51     "qdrant_url": QDRANT_GRPC_URL,
     52     "api_key": QDRANT_API_KEY,
   (...)
     60     "batch_size":"128",
     61 }
     63 embeddings_df.write.format("io.qdrant.spark.Qdrant").options(**options).mode(
     64     "append"
---&amp;gt; 65 ).save()

File /databricks/spark/python/pyspark/sql/connect/readwriter.py:670, in DataFrameWriter.save(self, path, format, mode, partitionBy, **options)
    668     self.format(format)
    669 self._write.path = path
--&amp;gt; 670 self._spark.client.execute_command(
    671     self._write.command(self._spark.client), self._write.observations
    672 )
File /databricks/spark/python/pyspark/sql/connect/client/core.py:1203, in SparkConnectClient.execute_command(self, command, observations, extra_request_metadata)
   1201     req.user_context.user_id = self._user_id
   1202 req.plan.command.CopyFrom(command)
-&amp;gt; 1203 data, _, _, _, properties = self._execute_and_fetch(
   1204     req, observations or {}, extra_request_metadata
   1205 )
   1206 if data is not None:
   1207     return (data.to_pandas(), properties)
File /databricks/spark/python/pyspark/sql/connect/client/core.py:1624, in SparkConnectClient._execute_and_fetch(self, req, observations, extra_request_metadata, self_destruct)
   1621 schema: Optional[StructType] = None
   1622 properties: Dict[str, Any] = {}
-&amp;gt; 1624 for response in self._execute_and_fetch_as_iterator(
   1625     req, observations, extra_request_metadata or []
   1626 ):
   1627     if isinstance(response, StructType):
   1628         schema = response
File /databricks/spark/python/pyspark/sql/connect/client/core.py:1601, in SparkConnectClient._execute_and_fetch_as_iterator(self, req, observations, extra_request_metadata)
   1599                     yield from handle_response(b)
   1600 except Exception as error:
-&amp;gt; 1601     self._handle_error(error)
File /databricks/spark/python/pyspark/sql/connect/client/core.py:1910, in SparkConnectClient._handle_error(self, error)
   1908 self.thread_local.inside_error_handling = True
   1909 if isinstance(error, grpc.RpcError):
-&amp;gt; 1910     self._handle_rpc_error(error)
   1911 elif isinstance(error, ValueError):
   1912     if "Cannot invoke RPC" in str(error) and "closed" in str(error):
File /databricks/spark/python/pyspark/sql/connect/client/core.py:1985, in SparkConnectClient._handle_rpc_error(self, rpc_error)
   1982             info = error_details_pb2.ErrorInfo()
   1983             d.Unpack(info)
-&amp;gt; 1985             raise convert_exception(
   1986                 info,
   1987                 status.message,
   1988                 self._fetch_enriched_error(info),
   1989                 self._display_server_stack_trace(),
   1990             ) from None
   1992     raise SparkConnectGrpcException(status.message) from None&lt;/LI-CODE&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;Any way to make this work on the shared cluster? It works on my personal cluster.&amp;nbsp;&lt;/P&gt;</description>
    <pubDate>Thu, 20 Jun 2024 07:01:45 GMT</pubDate>
    <dc:creator>Awoke101</dc:creator>
    <dc:date>2024-06-20T07:01:45Z</dc:date>
    <item>
      <title>UC_COMMAND_NOT_SUPPORTED.WITHOUT_RECOMMENDATION in shared access mode?</title>
      <link>https://community.databricks.com/t5/get-started-discussions/uc-command-not-supported-without-recommendation-in-shared-access/m-p/75109#M7534</link>
      <description>&lt;P&gt;I'm using a shared access cluster and am getting this error while trying to upload to Qdrant.&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;LI-CODE lang="python"&gt;#embeddings_df = embeddings_df.limit(5)

options = {
    "qdrant_url": QDRANT_GRPC_URL,
    "api_key": QDRANT_API_KEY,
    "collection_name": QDRANT_COLLECTION_NAME,
    "vector_fields": "dense_vector",
    "vector_names": "dense",
    "schema": embeddings_df.schema.json(),
    "batch_size":"128",
}

embeddings_df.write.format("io.qdrant.spark.Qdrant").options(**options).mode(
    "append"
).save()&lt;/LI-CODE&gt;&lt;LI-CODE lang="markup"&gt;[UC_COMMAND_NOT_SUPPORTED.WITHOUT_RECOMMENDATION] The command(s): AppendData are not supported in Unity Catalog.  SQLSTATE: 0AKUC
File &amp;lt;command-2604385794497207&amp;gt;, line 65
     48 #embeddings_df = embeddings_df.limit(5)
     50 options = {
     51     "qdrant_url": QDRANT_GRPC_URL,
     52     "api_key": QDRANT_API_KEY,
   (...)
     60     "batch_size":"128",
     61 }
     63 embeddings_df.write.format("io.qdrant.spark.Qdrant").options(**options).mode(
     64     "append"
---&amp;gt; 65 ).save()

File /databricks/spark/python/pyspark/sql/connect/readwriter.py:670, in DataFrameWriter.save(self, path, format, mode, partitionBy, **options)
    668     self.format(format)
    669 self._write.path = path
--&amp;gt; 670 self._spark.client.execute_command(
    671     self._write.command(self._spark.client), self._write.observations
    672 )
File /databricks/spark/python/pyspark/sql/connect/client/core.py:1203, in SparkConnectClient.execute_command(self, command, observations, extra_request_metadata)
   1201     req.user_context.user_id = self._user_id
   1202 req.plan.command.CopyFrom(command)
-&amp;gt; 1203 data, _, _, _, properties = self._execute_and_fetch(
   1204     req, observations or {}, extra_request_metadata
   1205 )
   1206 if data is not None:
   1207     return (data.to_pandas(), properties)
File /databricks/spark/python/pyspark/sql/connect/client/core.py:1624, in SparkConnectClient._execute_and_fetch(self, req, observations, extra_request_metadata, self_destruct)
   1621 schema: Optional[StructType] = None
   1622 properties: Dict[str, Any] = {}
-&amp;gt; 1624 for response in self._execute_and_fetch_as_iterator(
   1625     req, observations, extra_request_metadata or []
   1626 ):
   1627     if isinstance(response, StructType):
   1628         schema = response
File /databricks/spark/python/pyspark/sql/connect/client/core.py:1601, in SparkConnectClient._execute_and_fetch_as_iterator(self, req, observations, extra_request_metadata)
   1599                     yield from handle_response(b)
   1600 except Exception as error:
-&amp;gt; 1601     self._handle_error(error)
File /databricks/spark/python/pyspark/sql/connect/client/core.py:1910, in SparkConnectClient._handle_error(self, error)
   1908 self.thread_local.inside_error_handling = True
   1909 if isinstance(error, grpc.RpcError):
-&amp;gt; 1910     self._handle_rpc_error(error)
   1911 elif isinstance(error, ValueError):
   1912     if "Cannot invoke RPC" in str(error) and "closed" in str(error):
File /databricks/spark/python/pyspark/sql/connect/client/core.py:1985, in SparkConnectClient._handle_rpc_error(self, rpc_error)
   1982             info = error_details_pb2.ErrorInfo()
   1983             d.Unpack(info)
-&amp;gt; 1985             raise convert_exception(
   1986                 info,
   1987                 status.message,
   1988                 self._fetch_enriched_error(info),
   1989                 self._display_server_stack_trace(),
   1990             ) from None
   1992     raise SparkConnectGrpcException(status.message) from None&lt;/LI-CODE&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;&lt;/P&gt;&lt;P&gt;&amp;nbsp;Any way to make this work on the shared cluster? It works on my personal cluster.&amp;nbsp;&lt;/P&gt;</description>
      <pubDate>Thu, 20 Jun 2024 07:01:45 GMT</pubDate>
      <guid>https://community.databricks.com/t5/get-started-discussions/uc-command-not-supported-without-recommendation-in-shared-access/m-p/75109#M7534</guid>
      <dc:creator>Awoke101</dc:creator>
      <dc:date>2024-06-20T07:01:45Z</dc:date>
    </item>
  </channel>
</rss>

