<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic Re: MLflow Model Serving latency expectations in Data Engineering</title>
    <link>https://community.databricks.com/t5/data-engineering/mlflow-model-serving-latency-expectations/m-p/18714#M12453</link>
    <description>&lt;P&gt;What are your throughput requirements in addition to latency. Currently this is in private preview and databricks recommends this only for  low throughput and non-critical applications. However, as it move towards GA, this would change. Please get in touch with the databricks accounts team &lt;/P&gt;</description>
    <pubDate>Fri, 25 Jun 2021 21:47:58 GMT</pubDate>
    <dc:creator>sajith_appukutt</dc:creator>
    <dc:date>2021-06-25T21:47:58Z</dc:date>
    <item>
      <title>MLflow Model Serving latency expectations</title>
      <link>https://community.databricks.com/t5/data-engineering/mlflow-model-serving-latency-expectations/m-p/18713#M12452</link>
      <description>&lt;P&gt;What kind of latency should I expect when using the built in model serving capability in MLflow. Evaluating whether it would be a good fit for our use case&lt;/P&gt;</description>
      <pubDate>Fri, 25 Jun 2021 20:50:42 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/mlflow-model-serving-latency-expectations/m-p/18713#M12452</guid>
      <dc:creator>User16826992666</dc:creator>
      <dc:date>2021-06-25T20:50:42Z</dc:date>
    </item>
    <item>
      <title>Re: MLflow Model Serving latency expectations</title>
      <link>https://community.databricks.com/t5/data-engineering/mlflow-model-serving-latency-expectations/m-p/18714#M12453</link>
      <description>&lt;P&gt;What are your throughput requirements in addition to latency. Currently this is in private preview and databricks recommends this only for  low throughput and non-critical applications. However, as it move towards GA, this would change. Please get in touch with the databricks accounts team &lt;/P&gt;</description>
      <pubDate>Fri, 25 Jun 2021 21:47:58 GMT</pubDate>
      <guid>https://community.databricks.com/t5/data-engineering/mlflow-model-serving-latency-expectations/m-p/18714#M12453</guid>
      <dc:creator>sajith_appukutt</dc:creator>
      <dc:date>2021-06-25T21:47:58Z</dc:date>
    </item>
  </channel>
</rss>

