<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic ai_query and cached tokens in Generative AI</title>
    <link>https://community.databricks.com/t5/generative-ai/ai-query-and-cached-tokens/m-p/153984#M1743</link>
    <description>&lt;P&gt;Is ai_query actually able to use OpenAI's cached tokens? I was not unable to prove it. The response object from ai_query does not contain the raw response, and when I re-run an identical request via OpenAI SDK (identical model, settings etc.) and examine the response, cached_tokens = 0, which indicates that caching doe snot work in this setup, for whatever reason.&lt;/P&gt;</description>
    <pubDate>Thu, 09 Apr 2026 19:30:11 GMT</pubDate>
    <dc:creator>samuel86</dc:creator>
    <dc:date>2026-04-09T19:30:11Z</dc:date>
    <item>
      <title>ai_query and cached tokens</title>
      <link>https://community.databricks.com/t5/generative-ai/ai-query-and-cached-tokens/m-p/153984#M1743</link>
      <description>&lt;P&gt;Is ai_query actually able to use OpenAI's cached tokens? I was not unable to prove it. The response object from ai_query does not contain the raw response, and when I re-run an identical request via OpenAI SDK (identical model, settings etc.) and examine the response, cached_tokens = 0, which indicates that caching doe snot work in this setup, for whatever reason.&lt;/P&gt;</description>
      <pubDate>Thu, 09 Apr 2026 19:30:11 GMT</pubDate>
      <guid>https://community.databricks.com/t5/generative-ai/ai-query-and-cached-tokens/m-p/153984#M1743</guid>
      <dc:creator>samuel86</dc:creator>
      <dc:date>2026-04-09T19:30:11Z</dc:date>
    </item>
  </channel>
</rss>

