Hi @kamal_sharma2
The issue you're encountering is due to a mismatch between model flavors and loading methods.
When you used mlflow.sklearn.log_model() to log a Spark ML PipelineModel, you incorrectly logged it as a scikit-learn model, but it's actually a Spark ML model. This causes type confusion when loading.
Solution: Re-log the Model with Correct Flavor
First, determine what type of model you actually have:
from pyspark.ml import PipelineModel
import mlflow
import mlflow.spark
# Load your original model
model_path = "<volumePath>/sparkML_pipeline2022_2_0.model"
pipeline_model = PipelineModel.load(model_path)
# Check the model type
print(f"Model type: {type(pipeline_model)}")
print(f"Model stages: {[type(stage).__name__ for stage in pipeline_model.stages]}")
# Log it correctly as a Spark model
with mlflow.start_run():
try:
# This should work for Spark ML models
mlflow.spark.log_model(pipeline_model, "spark_pipeline_model")
print("Successfully logged as Spark model")
except Exception as e:
print(f"Error logging as Spark model: {e}")
# If it fails, the model might have compatibility issues
If the above fails, try this alternative approach:
# Alternative: Log with explicit Spark ML flavor
import mlflow.pyfunc
class SparkModelWrapper(mlflow.pyfunc.PythonModel):
def __init__(self, spark_model):
self.spark_model = spark_model
def predict(self, context, model_input):
# Convert pandas DataFrame to Spark DataFrame if needed
if hasattr(model_input, 'toPandas'):
# Already a Spark DataFrame
return self.spark_model.transform(model_input)
else:
# Convert pandas to Spark DataFrame
spark_df = context.spark_session.createDataFrame(model_input)
result = self.spark_model.transform(spark_df)
return result.toPandas()
# Log the wrapped model
with mlflow.start_run():
mlflow.pyfunc.log_model(
"spark_pipeline_model",
python_model=SparkModelWrapper(pipeline_model),
artifacts={"model_path": model_path}
)
LR