Text Grade Level Metric

pydantic model ibm_watsonx_gov.metrics.text_grade_level.text_grade_level_metric.TextGradeLevelMetric

Bases: GenAIMetric

Defines the Text Grade Level metric class.

The Text Grade Level metric measures the approximate reading US grade level of a text. It is computed using the flesch_kincaid_grade method. Its possible values typically range from 0 to 12+

  • Negative scores are rare and only occur with artificially simple texts.

  • No strict upper limit—some highly complex texts can score 30+, but these are extremely hard to read.

Examples

  1. Create Text Grade Level metric with default parameters and compute using metrics evaluator.
    metric = TextGradeLevelMetric()
    result = MetricsEvaluator().evaluate(data={"generated_text": "..."},
                                        metrics=[metric])
    
  2. Create Text Grade Level metric with a custom threshold.
    threshold  = MetricThreshold(type="lower_limit", value=6)
    metric = TextGradeLevelMetric(thresholds=[threshold])
    

Show JSON schema
{
   "title": "TextGradeLevelMetric",
   "description": "Defines the Text Grade Level metric class.\n\nThe Text Grade Level metric measures the approximate reading US grade level of a text.\nIt is computed using the flesch_kincaid_grade method.\nIts possible values typically range from 0 to 12+\n    - Negative scores are rare and only occur with artificially simple texts.\n    - No strict upper limit\u2014some highly complex texts can score 30+, but these are extremely hard to read.\n\nExamples:\n    1. Create Text Grade Level metric with default parameters and compute using metrics evaluator.\n        .. code-block:: python\n\n            metric = TextGradeLevelMetric()\n            result = MetricsEvaluator().evaluate(data={\"generated_text\": \"...\"}, \n                                                metrics=[metric])\n\n    2. Create Text Grade Level metric with a custom threshold.\n        .. code-block:: python\n\n            threshold  = MetricThreshold(type=\"lower_limit\", value=6)\n            metric = TextGradeLevelMetric(thresholds=[threshold])",
   "type": "object",
   "properties": {
      "name": {
         "const": "text_grade_level",
         "default": "text_grade_level",
         "description": "The text grade level metric name.",
         "title": "name",
         "type": "string"
      },
      "thresholds": {
         "default": [
            {
               "type": "lower_limit",
               "value": 6.0
            }
         ],
         "description": "The metric thresholds.",
         "items": {
            "$ref": "#/$defs/MetricThreshold"
         },
         "title": "Thresholds",
         "type": "array"
      },
      "tasks": {
         "default": [
            "question_answering",
            "classification",
            "summarization",
            "generation",
            "extraction",
            "retrieval_augmented_generation"
         ],
         "description": "The list of supported tasks.",
         "items": {
            "$ref": "#/$defs/TaskType"
         },
         "title": "Tasks",
         "type": "array"
      },
      "group": {
         "$ref": "#/$defs/MetricGroup",
         "default": "readability",
         "description": "The metric group.",
         "title": "Group"
      },
      "is_reference_free": {
         "default": true,
         "description": "Decides whether this metric needs a reference for computation",
         "title": "Is Reference Free",
         "type": "boolean"
      },
      "method": {
         "const": "flesch_kincaid_grade",
         "default": "flesch_kincaid_grade",
         "description": "The method used to compute text grade level metric.",
         "title": "Method",
         "type": "string"
      },
      "metric_dependencies": {
         "default": [],
         "description": "Metrics that needs to be evaluated first",
         "items": {
            "$ref": "#/$defs/GenAIMetric"
         },
         "title": "Metric Dependencies",
         "type": "array"
      }
   },
   "$defs": {
      "GenAIMetric": {
         "description": "Defines the Generative AI metric interface",
         "properties": {
            "name": {
               "description": "The name of the metric",
               "title": "Metric Name",
               "type": "string"
            },
            "thresholds": {
               "default": [],
               "description": "The list of thresholds",
               "items": {
                  "$ref": "#/$defs/MetricThreshold"
               },
               "title": "Thresholds",
               "type": "array"
            },
            "tasks": {
               "description": "The task types this metric is associated with.",
               "items": {
                  "$ref": "#/$defs/TaskType"
               },
               "title": "Tasks",
               "type": "array"
            },
            "group": {
               "anyOf": [
                  {
                     "$ref": "#/$defs/MetricGroup"
                  },
                  {
                     "type": "null"
                  }
               ],
               "default": null,
               "description": "The metric group this metric belongs to."
            },
            "is_reference_free": {
               "default": true,
               "description": "Decides whether this metric needs a reference for computation",
               "title": "Is Reference Free",
               "type": "boolean"
            },
            "method": {
               "anyOf": [
                  {
                     "type": "string"
                  },
                  {
                     "type": "null"
                  }
               ],
               "default": null,
               "description": "The method used to compute the metric.",
               "title": "Method"
            },
            "metric_dependencies": {
               "default": [],
               "description": "Metrics that needs to be evaluated first",
               "items": {
                  "$ref": "#/$defs/GenAIMetric"
               },
               "title": "Metric Dependencies",
               "type": "array"
            }
         },
         "required": [
            "name",
            "tasks"
         ],
         "title": "GenAIMetric",
         "type": "object"
      },
      "MetricGroup": {
         "enum": [
            "retrieval_quality",
            "answer_quality",
            "content_safety",
            "performance",
            "usage",
            "tool_call_quality",
            "readability"
         ],
         "title": "MetricGroup",
         "type": "string"
      },
      "MetricThreshold": {
         "description": "The class that defines the threshold for a metric.",
         "properties": {
            "type": {
               "description": "Threshold type. One of 'lower_limit', 'upper_limit'",
               "enum": [
                  "lower_limit",
                  "upper_limit"
               ],
               "title": "Type",
               "type": "string"
            },
            "value": {
               "default": 0,
               "description": "The value of metric threshold",
               "title": "Threshold value",
               "type": "number"
            }
         },
         "required": [
            "type"
         ],
         "title": "MetricThreshold",
         "type": "object"
      },
      "TaskType": {
         "description": "Supported task types for generative AI models",
         "enum": [
            "question_answering",
            "classification",
            "summarization",
            "generation",
            "extraction",
            "retrieval_augmented_generation"
         ],
         "title": "TaskType",
         "type": "string"
      }
   }
}

Fields:
field group: ', frozen=True)] = MetricGroup.READABILITY

The metric group.

field method: Annotated[Literal['flesch_kincaid_grade'], FieldInfo(annotation=NoneType, required=False, default='flesch_kincaid_grade', title='Method', description='The method used to compute text grade level metric.')] = 'flesch_kincaid_grade'

The method used to compute text grade level metric.

field name: Annotated[Literal['text_grade_level'], FieldInfo(annotation=NoneType, required=False, default='text_grade_level', title='name', description='The text grade level metric name.', frozen=True)] = 'text_grade_level'

The text grade level metric name.

field tasks: Annotated[list[TaskType], FieldInfo(annotation=NoneType, required=False, default=['question_answering', 'classification', 'summarization', 'generation', 'extraction', 'retrieval_augmented_generation'], title='Tasks', description='The list of supported tasks.', frozen=True)] = ['question_answering', 'classification', 'summarization', 'generation', 'extraction', 'retrieval_augmented_generation']

The list of supported tasks.

field thresholds: Annotated[list[MetricThreshold], FieldInfo(annotation=NoneType, required=False, default=[MetricThreshold(type='lower_limit', value=6.0)], title='Thresholds', description='The metric thresholds.')] = [MetricThreshold(type='lower_limit', value=6.0)]

The metric thresholds.

evaluate(data: DataFrame, configuration: GenAIConfiguration | AgenticAIConfiguration, **kwargs) list[AggregateMetricResult]
model_post_init(context: Any, /) None

We need to both initialize private attributes and call the user-defined model_post_init method.

pydantic model ibm_watsonx_gov.metrics.text_grade_level.text_grade_level_metric.TextGradeLevelResult

Bases: RecordMetricResult

Show JSON schema
{
   "title": "TextGradeLevelResult",
   "type": "object",
   "properties": {
      "name": {
         "default": "text_grade_level",
         "title": "Name",
         "type": "string"
      },
      "method": {
         "default": "flesch_kincaid_grade",
         "title": "Method",
         "type": "string"
      },
      "provider": {
         "default": "textstat",
         "title": "Provider",
         "type": "string"
      },
      "value": {
         "anyOf": [
            {
               "type": "number"
            },
            {
               "type": "string"
            },
            {
               "type": "boolean"
            },
            {
               "type": "null"
            }
         ],
         "description": "The metric value.",
         "title": "Value"
      },
      "errors": {
         "anyOf": [
            {
               "items": {
                  "$ref": "#/$defs/Error"
               },
               "type": "array"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "description": "The list of error messages",
         "title": "Errors"
      },
      "additional_info": {
         "anyOf": [
            {
               "type": "object"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "description": "The additional information about the metric result.",
         "title": "Additional Info"
      },
      "group": {
         "$ref": "#/$defs/MetricGroup",
         "default": "readability"
      },
      "thresholds": {
         "default": [],
         "description": "The metric thresholds",
         "items": {
            "$ref": "#/$defs/MetricThreshold"
         },
         "title": "Thresholds",
         "type": "array"
      },
      "record_id": {
         "description": "The record identifier.",
         "examples": [
            "record1"
         ],
         "title": "Record Id",
         "type": "string"
      },
      "record_timestamp": {
         "anyOf": [
            {
               "type": "string"
            },
            {
               "type": "null"
            }
         ],
         "default": null,
         "description": "The record timestamp.",
         "examples": [
            "2025-01-01T00:00:00.000000Z"
         ],
         "title": "Record Timestamp"
      }
   },
   "$defs": {
      "Error": {
         "properties": {
            "code": {
               "description": "The error code",
               "title": "Code",
               "type": "string"
            },
            "message_en": {
               "description": "The error message in English.",
               "title": "Message En",
               "type": "string"
            },
            "parameters": {
               "default": [],
               "description": "The list of parameters to construct the message in a different locale.",
               "items": {},
               "title": "Parameters",
               "type": "array"
            }
         },
         "required": [
            "code",
            "message_en"
         ],
         "title": "Error",
         "type": "object"
      },
      "MetricGroup": {
         "enum": [
            "retrieval_quality",
            "answer_quality",
            "content_safety",
            "performance",
            "usage",
            "tool_call_quality",
            "readability"
         ],
         "title": "MetricGroup",
         "type": "string"
      },
      "MetricThreshold": {
         "description": "The class that defines the threshold for a metric.",
         "properties": {
            "type": {
               "description": "Threshold type. One of 'lower_limit', 'upper_limit'",
               "enum": [
                  "lower_limit",
                  "upper_limit"
               ],
               "title": "Type",
               "type": "string"
            },
            "value": {
               "default": 0,
               "description": "The value of metric threshold",
               "title": "Threshold value",
               "type": "number"
            }
         },
         "required": [
            "type"
         ],
         "title": "MetricThreshold",
         "type": "object"
      }
   },
   "required": [
      "value",
      "record_id"
   ]
}

Config:
  • arbitrary_types_allowed: bool = True

  • use_enum_values: bool = True

Fields:
field group: MetricGroup = MetricGroup.READABILITY
field method: str = 'flesch_kincaid_grade'
field name: str = 'text_grade_level'
field provider: str = 'textstat'