Guardrails
from mlserver.model import MLModel
from mlserver.codecs import StringCodec
from mlserver.types import InferenceRequest, InferenceResponse
class MessageModel(MLModel):
async def load(self) -> bool:
self.ready = True
return self.ready
async def predict(self, payload: InferenceRequest) -> InferenceResponse:
return InferenceResponse(
model_name=self.settings.name,
model_version=self.settings.version,
outputs=[
StringCodec.encode_output(
name="role",
payload=["assistant"],
),
StringCodec.encode_output(
name="content",
payload=["I cannot talk about this."],
),
StringCodec.encode_output(
name="type",
payload=["text"],
),
]
)Last updated
Was this helpful?