<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" xmlns:taxo="http://purl.org/rss/1.0/modules/taxonomy/" version="2.0">
  <channel>
    <title>topic The model is always stuck in pending state, while the serving status says ready. in Machine Learning</title>
    <link>https://community.databricks.com/t5/machine-learning/the-model-is-always-stuck-in-pending-state-while-the-serving/m-p/17682#M962</link>
    <description>&lt;P&gt;I am serving a logistic regression model, and I keep getting this error. The issue tends to happen as more data is being modeled, but no matter how much I increase the serving cluster memory, it still error. Here is the stack trace:&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;22/06/14 15:24:47 WARN TaskSetManager: Lost task 0.0 in stage 17.0 (TID 17) (10.24.7.205 executor driver): TaskResultLost (result lost from block manager)&lt;/P&gt;&lt;P&gt;22/06/14 15:24:47 ERROR TaskSetManager: Task 0 in stage 17.0 failed 1 times; aborting job&lt;/P&gt;&lt;P&gt;22/06/14 15:24:47 ERROR Instrumentation: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 17.0 failed 1 times, most recent failure: Lost task 0.0 in stage 17.0 (TID 17) (10.24.7.205 executor driver): TaskResultLost (result lost from block manager)&lt;/P&gt;&lt;P&gt;Driver stacktrace:&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2403)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2352)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2351)&lt;/P&gt;&lt;P&gt;	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)&lt;/P&gt;&lt;P&gt;	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)&lt;/P&gt;&lt;P&gt;	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2351)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1109)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1109)&lt;/P&gt;&lt;P&gt;	at scala.Option.foreach(Option.scala:407)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1109)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2591)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2533)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2522)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:898)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2214)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2235)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2254)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:476)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:429)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:48)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3715)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.$anonfun$head$1(Dataset.scala:2728)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3706)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3704)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.head(Dataset.scala:2728)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.head(Dataset.scala:2735)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.classification.LogisticRegressionModel$LogisticRegressionModelReader.load(LogisticRegression.scala:1352)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.classification.LogisticRegressionModel$LogisticRegressionModelReader.load(LogisticRegression.scala:1324)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.Pipeline$SharedReadWrite$.$anonfun$load$5(Pipeline.scala:277)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.MLEvents.withLoadInstanceEvent(events.scala:160)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.MLEvents.withLoadInstanceEvent$(events.scala:155)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation.withLoadInstanceEvent(Instrumentation.scala:42)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.Pipeline$SharedReadWrite$.$anonfun$load$4(Pipeline.scala:277)&lt;/P&gt;&lt;P&gt;	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)&lt;/P&gt;&lt;P&gt;	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)&lt;/P&gt;&lt;P&gt;	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)&lt;/P&gt;&lt;P&gt;	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)&lt;/P&gt;&lt;P&gt;	at scala.collection.TraversableLike.map(TraversableLike.scala:286)&lt;/P&gt;&lt;P&gt;	at scala.collection.TraversableLike.map$(TraversableLike.scala:279)&lt;/P&gt;&lt;P&gt;	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:198)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.Pipeline$SharedReadWrite$.$anonfun$load$3(Pipeline.scala:274)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation$.$anonfun$instrumented$1(Instrumentation.scala:191)&lt;/P&gt;&lt;P&gt;	at scala.util.Try$.apply(Try.scala:213)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation$.instrumented(Instrumentation.scala:191)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.Pipeline$SharedReadWrite$.load(Pipeline.scala:268)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.PipelineModel$PipelineModelReader.$anonfun$load$7(Pipeline.scala:356)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.MLEvents.withLoadInstanceEvent(events.scala:160)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.MLEvents.withLoadInstanceEvent$(events.scala:155)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation.withLoadInstanceEvent(Instrumentation.scala:42)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.PipelineModel$PipelineModelReader.$anonfun$load$6(Pipeline.scala:355)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation$.$anonfun$instrumented$1(Instrumentation.scala:191)&lt;/P&gt;&lt;P&gt;	at scala.util.Try$.apply(Try.scala:213)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation$.instrumented(Instrumentation.scala:191)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.PipelineModel$PipelineModelReader.load(Pipeline.scala:355)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.PipelineModel$PipelineModelReader.load(Pipeline.scala:349)&lt;/P&gt;&lt;P&gt;	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)&lt;/P&gt;&lt;P&gt;	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)&lt;/P&gt;&lt;P&gt;	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)&lt;/P&gt;&lt;P&gt;	at java.lang.reflect.Method.invoke(Method.java:498)&lt;/P&gt;&lt;P&gt;	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)&lt;/P&gt;&lt;P&gt;	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)&lt;/P&gt;&lt;P&gt;	at py4j.Gateway.invoke(Gateway.java:282)&lt;/P&gt;&lt;P&gt;	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)&lt;/P&gt;&lt;P&gt;	at py4j.commands.CallCommand.execute(CallCommand.java:79)&lt;/P&gt;&lt;P&gt;	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)&lt;/P&gt;&lt;P&gt;	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)&lt;/P&gt;&lt;P&gt;	at java.lang.Thread.run(Thread.java:748)&lt;/P&gt;</description>
    <pubDate>Tue, 14 Jun 2022 15:26:33 GMT</pubDate>
    <dc:creator>haylee</dc:creator>
    <dc:date>2022-06-14T15:26:33Z</dc:date>
    <item>
      <title>The model is always stuck in pending state, while the serving status says ready.</title>
      <link>https://community.databricks.com/t5/machine-learning/the-model-is-always-stuck-in-pending-state-while-the-serving/m-p/17682#M962</link>
      <description>&lt;P&gt;I am serving a logistic regression model, and I keep getting this error. The issue tends to happen as more data is being modeled, but no matter how much I increase the serving cluster memory, it still error. Here is the stack trace:&lt;/P&gt;&lt;P&gt;&lt;/P&gt;&lt;P&gt;22/06/14 15:24:47 WARN TaskSetManager: Lost task 0.0 in stage 17.0 (TID 17) (10.24.7.205 executor driver): TaskResultLost (result lost from block manager)&lt;/P&gt;&lt;P&gt;22/06/14 15:24:47 ERROR TaskSetManager: Task 0 in stage 17.0 failed 1 times; aborting job&lt;/P&gt;&lt;P&gt;22/06/14 15:24:47 ERROR Instrumentation: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 17.0 failed 1 times, most recent failure: Lost task 0.0 in stage 17.0 (TID 17) (10.24.7.205 executor driver): TaskResultLost (result lost from block manager)&lt;/P&gt;&lt;P&gt;Driver stacktrace:&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2403)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2352)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2351)&lt;/P&gt;&lt;P&gt;	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)&lt;/P&gt;&lt;P&gt;	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)&lt;/P&gt;&lt;P&gt;	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2351)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1109)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1109)&lt;/P&gt;&lt;P&gt;	at scala.Option.foreach(Option.scala:407)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1109)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2591)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2533)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2522)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:898)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2214)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2235)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2254)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:476)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:429)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:48)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3715)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.$anonfun$head$1(Dataset.scala:2728)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3706)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:103)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:163)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:90)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:775)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3704)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.head(Dataset.scala:2728)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.sql.Dataset.head(Dataset.scala:2735)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.classification.LogisticRegressionModel$LogisticRegressionModelReader.load(LogisticRegression.scala:1352)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.classification.LogisticRegressionModel$LogisticRegressionModelReader.load(LogisticRegression.scala:1324)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.Pipeline$SharedReadWrite$.$anonfun$load$5(Pipeline.scala:277)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.MLEvents.withLoadInstanceEvent(events.scala:160)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.MLEvents.withLoadInstanceEvent$(events.scala:155)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation.withLoadInstanceEvent(Instrumentation.scala:42)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.Pipeline$SharedReadWrite$.$anonfun$load$4(Pipeline.scala:277)&lt;/P&gt;&lt;P&gt;	at scala.collection.TraversableLike.$anonfun$map$1(TraversableLike.scala:286)&lt;/P&gt;&lt;P&gt;	at scala.collection.IndexedSeqOptimized.foreach(IndexedSeqOptimized.scala:36)&lt;/P&gt;&lt;P&gt;	at scala.collection.IndexedSeqOptimized.foreach$(IndexedSeqOptimized.scala:33)&lt;/P&gt;&lt;P&gt;	at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:198)&lt;/P&gt;&lt;P&gt;	at scala.collection.TraversableLike.map(TraversableLike.scala:286)&lt;/P&gt;&lt;P&gt;	at scala.collection.TraversableLike.map$(TraversableLike.scala:279)&lt;/P&gt;&lt;P&gt;	at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:198)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.Pipeline$SharedReadWrite$.$anonfun$load$3(Pipeline.scala:274)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation$.$anonfun$instrumented$1(Instrumentation.scala:191)&lt;/P&gt;&lt;P&gt;	at scala.util.Try$.apply(Try.scala:213)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation$.instrumented(Instrumentation.scala:191)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.Pipeline$SharedReadWrite$.load(Pipeline.scala:268)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.PipelineModel$PipelineModelReader.$anonfun$load$7(Pipeline.scala:356)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.MLEvents.withLoadInstanceEvent(events.scala:160)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.MLEvents.withLoadInstanceEvent$(events.scala:155)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation.withLoadInstanceEvent(Instrumentation.scala:42)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.PipelineModel$PipelineModelReader.$anonfun$load$6(Pipeline.scala:355)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation$.$anonfun$instrumented$1(Instrumentation.scala:191)&lt;/P&gt;&lt;P&gt;	at scala.util.Try$.apply(Try.scala:213)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.util.Instrumentation$.instrumented(Instrumentation.scala:191)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.PipelineModel$PipelineModelReader.load(Pipeline.scala:355)&lt;/P&gt;&lt;P&gt;	at org.apache.spark.ml.PipelineModel$PipelineModelReader.load(Pipeline.scala:349)&lt;/P&gt;&lt;P&gt;	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)&lt;/P&gt;&lt;P&gt;	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)&lt;/P&gt;&lt;P&gt;	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)&lt;/P&gt;&lt;P&gt;	at java.lang.reflect.Method.invoke(Method.java:498)&lt;/P&gt;&lt;P&gt;	at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)&lt;/P&gt;&lt;P&gt;	at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)&lt;/P&gt;&lt;P&gt;	at py4j.Gateway.invoke(Gateway.java:282)&lt;/P&gt;&lt;P&gt;	at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)&lt;/P&gt;&lt;P&gt;	at py4j.commands.CallCommand.execute(CallCommand.java:79)&lt;/P&gt;&lt;P&gt;	at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)&lt;/P&gt;&lt;P&gt;	at py4j.ClientServerConnection.run(ClientServerConnection.java:106)&lt;/P&gt;&lt;P&gt;	at java.lang.Thread.run(Thread.java:748)&lt;/P&gt;</description>
      <pubDate>Tue, 14 Jun 2022 15:26:33 GMT</pubDate>
      <guid>https://community.databricks.com/t5/machine-learning/the-model-is-always-stuck-in-pending-state-while-the-serving/m-p/17682#M962</guid>
      <dc:creator>haylee</dc:creator>
      <dc:date>2022-06-14T15:26:33Z</dc:date>
    </item>
  </channel>
</rss>

