Skip to content

Instantly share code, notes, and snippets.

@teamdandelion
Created August 20, 2025 18:43
Show Gist options
  • Save teamdandelion/90942722034c43c65f9ad8d162c95f6e to your computer and use it in GitHub Desktop.
Save teamdandelion/90942722034c43c65f9ad8d162c95f6e to your computer and use it in GitHub Desktop.
import json
import sys
from typing import Any
def load_results(filename: str = "openai_model_test_results.json") -> list[dict[str, Any]]:
"""Load test results from JSON file."""
try:
with open(filename) as f:
data = json.load(f)
return data["results"]
except FileNotFoundError:
print(f"Results file '{filename}' not found. Run test_openai_support.py first.")
sys.exit(1)
except json.JSONDecodeError:
print(f"Invalid JSON in '{filename}'")
sys.exit(1)
def analyze_results(results: list[dict[str, Any]]) -> None:
"""Analyze results and print models without support."""
# Filter out models that weren't found
existing_models = [r for r in results if not r.get("not_found", False)]
# Models without strict support
no_strict = [r["model"] for r in existing_models if not r.get("supports_strict", False)]
# Models without JSON support
no_json = [r["model"] for r in existing_models if not r.get("supports_json", False)]
print("OPENAI MODEL SUPPORT ANALYSIS")
print("=" * 50)
print(f"Total models tested: {len(results)}")
print(f"Existing models: {len(existing_models)}")
print(f"Models not found: {len(results) - len(existing_models)}")
print(f"\nMODELS WITHOUT STRICT SUPPORT ({len(no_strict)}):")
print("-" * 50)
if no_strict:
for model in sorted(no_strict):
print(f" {model}")
else:
print(" All existing models support strict mode!")
print(f"\nMODELS WITHOUT JSON SUPPORT ({len(no_json)}):")
print("-" * 50)
if no_json:
for model in sorted(no_json):
print(f" {model}")
else:
print(" All existing models support JSON mode!")
# Summary stats
has_strict = len(existing_models) - len(no_strict)
has_json = len(existing_models) - len(no_json)
print("\nSUMMARY:")
print(f"Models with strict support: {has_strict}/{len(existing_models)} ({has_strict/len(existing_models)*100:.1f}%)")
print(f"Models with JSON support: {has_json}/{len(existing_models)} ({has_json/len(existing_models)*100:.1f}%)")
def export_lists(results: list[dict[str, Any]], format_type: str = "python") -> None:
"""Export the lists in different formats for easy copying."""
existing_models = [r for r in results if not r.get("not_found", False)]
no_strict = [r["model"] for r in existing_models if not r.get("supports_strict", False)]
no_json = [r["model"] for r in existing_models if not r.get("supports_json", False)]
print(f"\nEXPORT ({format_type.upper()}):")
print("=" * 50)
if format_type == "python":
print("NO_STRICT_SUPPORT = [")
for model in sorted(no_strict):
print(f' "{model}",')
print("]\n")
print("NO_JSON_SUPPORT = [")
for model in sorted(no_json):
print(f' "{model}",')
print("]")
elif format_type == "csv":
print("Models without strict support:")
print(",".join(sorted(no_strict)))
print("\nModels without JSON support:")
print(",".join(sorted(no_json)))
def main():
"""Main analysis function."""
if len(sys.argv) > 1:
filename = sys.argv[1]
else:
filename = "openai_model_test_results.json"
results = load_results(filename)
analyze_results(results)
print("\n" + "=" * 50)
export_lists(results, "python")
print(f"\nTo export as CSV, run: python {sys.argv[0]} {filename} csv")
if __name__ == "__main__":
if len(sys.argv) > 2 and sys.argv[2] == "csv":
results = load_results(sys.argv[1] if len(sys.argv) > 1 else "openai_model_test_results.json")
export_lists(results, "csv")
else:
main()
{
"timestamp": "2025-08-20T11:34:42.653637",
"total_models_tested": 62,
"results": [
{
"model": "gpt-5",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\"answer\":2}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-5-mini",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-5-nano",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\"answer\": 2}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-5-2025-08-07",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\"answer\":2}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-5-mini-2025-08-07",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\"answer\":2}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-5-nano-2025-08-07",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-5-chat-latest",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": false,
"json_passes_validation": true
},
{
"model": "gpt-4.1",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-4.1-mini",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-4.1-nano",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\"answer\": 2}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-4.1-2025-04-14",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-4.1-mini-2025-04-14",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-4.1-nano-2025-04-14",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "o4-mini",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\"answer\":2}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "o4-mini-2025-04-16",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\"answer\":2}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "o3",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "o3-2025-04-16",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\"answer\":2}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "o3-mini",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\": 2}",
"json_response": "{\"answer\": 2}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "o3-mini-2025-01-31",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\": 2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "o1",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "o1-2024-12-17",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "o1-preview",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "o1-preview-2024-09-12",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "o1-mini",
"not_found": false,
"supports_strict": false,
"supports_json": false,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": "Error code: 400 - {'error': {'message': \"Unsupported value: 'messages[0].role' does not support 'system' with this model.\", 'type': 'invalid_request_error', 'param': 'messages[0].role', 'code': 'unsupported_value'}}",
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "o1-mini-2024-09-12",
"not_found": false,
"supports_strict": false,
"supports_json": false,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": "Error code: 400 - {'error': {'message': \"Unsupported value: 'messages[0].role' does not support 'system' with this model.\", 'type': 'invalid_request_error', 'param': 'messages[0].role', 'code': 'unsupported_value'}}",
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4o",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-4o-2024-11-20",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-4o-2024-08-06",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-4o-2024-05-13",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": false,
"json_passes_validation": true
},
{
"model": "gpt-4o-audio-preview",
"not_found": false,
"supports_strict": false,
"supports_json": false,
"strict_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"json_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4o-audio-preview-2024-10-01",
"not_found": false,
"supports_strict": false,
"supports_json": false,
"strict_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"json_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4o-audio-preview-2024-12-17",
"not_found": false,
"supports_strict": false,
"supports_json": false,
"strict_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"json_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4o-audio-preview-2025-06-03",
"not_found": false,
"supports_strict": false,
"supports_json": false,
"strict_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"json_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4o-mini-audio-preview",
"not_found": false,
"supports_strict": false,
"supports_json": false,
"strict_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"json_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4o-mini-audio-preview-2024-12-17",
"not_found": false,
"supports_strict": false,
"supports_json": false,
"strict_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"json_error_message": "Error code: 400 - {'error': {'message': 'This model requires that either input content or output modality contain audio.', 'type': 'invalid_request_error', 'param': 'model', 'code': 'invalid_value'}}",
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4o-search-preview",
"not_found": false,
"supports_strict": true,
"supports_json": false,
"strict_error_message": null,
"json_error_message": "Error code: 400 - {'error': {'message': \"Response format 'json_object' is not supported with web_search.\", 'type': 'invalid_request_error', 'param': 'response_format', 'code': None}}",
"strict_response": "{\"answer\":2} ",
"json_response": "",
"strict_passes_validation": true,
"json_passes_validation": false
},
{
"model": "gpt-4o-mini-search-preview",
"not_found": false,
"supports_strict": true,
"supports_json": false,
"strict_error_message": null,
"json_error_message": "Error code: 400 - {'error': {'message': \"Response format 'json_object' is not supported with web_search.\", 'type': 'invalid_request_error', 'param': 'response_format', 'code': None}}",
"strict_response": "{\"answer\":2} ",
"json_response": "",
"strict_passes_validation": true,
"json_passes_validation": false
},
{
"model": "gpt-4o-search-preview-2025-03-11",
"not_found": false,
"supports_strict": true,
"supports_json": false,
"strict_error_message": null,
"json_error_message": "Error code: 400 - {'error': {'message': \"Response format 'json_object' is not supported with web_search.\", 'type': 'invalid_request_error', 'param': 'response_format', 'code': None}}",
"strict_response": "{\"answer\":2} ",
"json_response": "",
"strict_passes_validation": true,
"json_passes_validation": false
},
{
"model": "gpt-4o-mini-search-preview-2025-03-11",
"not_found": false,
"supports_strict": true,
"supports_json": false,
"strict_error_message": null,
"json_error_message": "Error code: 400 - {'error': {'message': \"Response format 'json_object' is not supported with web_search.\", 'type': 'invalid_request_error', 'param': 'response_format', 'code': None}}",
"strict_response": "{\"answer\": 2} ",
"json_response": "",
"strict_passes_validation": true,
"json_passes_validation": false
},
{
"model": "chatgpt-4o-latest",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\"answer\":2}",
"strict_passes_validation": false,
"json_passes_validation": true
},
{
"model": "codex-mini-latest",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4o-mini",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-4o-mini-2024-07-18",
"not_found": false,
"supports_strict": true,
"supports_json": true,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "{\"answer\":2}",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": true,
"json_passes_validation": true
},
{
"model": "gpt-4-turbo",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": false,
"json_passes_validation": true
},
{
"model": "gpt-4-turbo-2024-04-09",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": false,
"json_passes_validation": true
},
{
"model": "gpt-4-0125-preview",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": false,
"json_passes_validation": true
},
{
"model": "gpt-4-turbo-preview",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": false,
"json_passes_validation": true
},
{
"model": "gpt-4-1106-preview",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": false,
"json_passes_validation": true
},
{
"model": "gpt-4-vision-preview",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4",
"not_found": false,
"supports_strict": false,
"supports_json": false,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_object' is not supported with this model.\", 'type': 'invalid_request_error', 'param': 'response_format', 'code': None}}",
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4-0314",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4-0613",
"not_found": false,
"supports_strict": false,
"supports_json": false,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_object' is not supported with this model.\", 'type': 'invalid_request_error', 'param': 'response_format', 'code': None}}",
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4-32k",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4-32k-0314",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-4-32k-0613",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-3.5-turbo",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"properties\": {\n \"answer\": {\n \"title\": \"Answer\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"answer\"\n ],\n \"title\": \"JsonFormat\",\n \"type\": \"object\"\n}",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-3.5-turbo-16k",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"properties\": {\n \"answer\": {\n \"title\": \"Answer\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"answer\"\n ],\n \"title\": \"JsonFormat\",\n \"type\": \"object\"\n}",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-3.5-turbo-0301",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-3.5-turbo-0613",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-3.5-turbo-1106",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"answer\": 2\n}",
"strict_passes_validation": false,
"json_passes_validation": true
},
{
"model": "gpt-3.5-turbo-0125",
"not_found": false,
"supports_strict": false,
"supports_json": true,
"strict_error_message": "Error code: 400 - {'error': {'message': \"Invalid parameter: 'response_format' of type 'json_schema' is not supported with this model. Learn more about supported models at the Structured Outputs guide: https://platform.openai.com/docs/guides/structured-outputs\", 'type': 'invalid_request_error', 'param': None, 'code': None}}",
"json_error_message": null,
"strict_response": "",
"json_response": "{\n \"properties\": {\n \"answer\": {\n \"title\": \"Answer\",\n \"type\": \"integer\"\n }\n },\n \"required\": [\n \"answer\"\n ],\n \"title\": \"JsonFormat\",\n \"type\": \"object\"\n}",
"strict_passes_validation": false,
"json_passes_validation": false
},
{
"model": "gpt-3.5-turbo-16k-0613",
"not_found": true,
"supports_strict": false,
"supports_json": false,
"strict_error_message": null,
"json_error_message": null,
"strict_response": "",
"json_response": "",
"strict_passes_validation": false,
"json_passes_validation": false
}
]
}
import json
from dataclasses import asdict, dataclass
from datetime import datetime
import openai
from dotenv import load_dotenv
from pydantic import BaseModel
from mirascope import llm
load_dotenv()
MODELS_TO_TEST = [
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5-2025-08-07",
"gpt-5-mini-2025-08-07",
"gpt-5-nano-2025-08-07",
"gpt-5-chat-latest",
"gpt-4.1",
"gpt-4.1-mini",
"gpt-4.1-nano",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"o4-mini",
"o4-mini-2025-04-16",
"o3",
"o3-2025-04-16",
"o3-mini",
"o3-mini-2025-01-31",
"o1",
"o1-2024-12-17",
"o1-preview",
"o1-preview-2024-09-12",
"o1-mini",
"o1-mini-2024-09-12",
"gpt-4o",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"gpt-4o-audio-preview",
"gpt-4o-audio-preview-2024-10-01",
"gpt-4o-audio-preview-2024-12-17",
"gpt-4o-audio-preview-2025-06-03",
"gpt-4o-mini-audio-preview",
"gpt-4o-mini-audio-preview-2024-12-17",
"gpt-4o-search-preview",
"gpt-4o-mini-search-preview",
"gpt-4o-search-preview-2025-03-11",
"gpt-4o-mini-search-preview-2025-03-11",
"chatgpt-4o-latest",
"codex-mini-latest",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
]
@llm.format(mode="strict")
class StrictFormat(BaseModel):
answer: int
@llm.format(mode="json")
class JsonFormat(BaseModel):
answer: int
client = llm.clients.get_client(provider="openai")
@dataclass
class ModelReport:
model: str
not_found: bool = False
supports_strict: bool = False
supports_json: bool = False
strict_error_message: str | None = None
json_error_message: str | None = None
strict_response: str = ""
json_response: str = ""
strict_passes_validation: bool = False
json_passes_validation: bool = False
query = [llm.messages.user("What is 1+1")]
def test_model(model: str) -> ModelReport:
result = ModelReport(model=model)
# Test strict format support
try:
response = client.structured_call(
model=model, messages=query, format=StrictFormat
)
result.supports_strict = True
result.strict_response = response.pretty()
try:
response.format()
result.strict_passes_validation = True
except:
pass
except openai.NotFoundError:
result.not_found = True
return result
except openai.BadRequestError as e:
result.strict_error_message = e.message
except Exception as e:
result.strict_error_message = str(e)
# Test JSON format support (only if model exists)
if not result.not_found:
try:
response = client.structured_call(
model=model, messages=query, format=JsonFormat
)
result.supports_json = True
result.json_response = response.pretty()
try:
response.format()
result.json_passes_validation = True
except:
pass
except openai.BadRequestError as e:
result.json_error_message = e.message
except Exception as e:
result.json_error_message = str(e)
return result
def print_result(report: ModelReport):
"""Print test result to console in real-time."""
status = "NOT FOUND" if report.not_found else ""
if not report.not_found:
strict_status = "✓" if report.supports_strict else "✗"
json_status = "✓" if report.supports_json else "✗"
status = f"Strict: {strict_status} | JSON: {json_status}"
print(f"{report.model:40} | {status}")
if report.strict_error_message:
print(f" Strict error: {report.strict_error_message}")
if report.json_error_message:
print(f" JSON error: {report.json_error_message}")
def save_results_incrementally(results: list[ModelReport], filename: str = "openai_model_test_results.json"):
"""Save results to JSON file incrementally."""
data = {
"timestamp": datetime.now().isoformat(),
"total_models_tested": len(results),
"results": [asdict(report) for report in results]
}
with open(filename, 'w') as f:
json.dump(data, f, indent=2)
def main():
"""Main test execution."""
print("Testing OpenAI model support for structured outputs...")
print("=" * 80)
print(f"{'Model':40} | Status")
print("-" * 80)
results = []
for i, model in enumerate(MODELS_TO_TEST, 1):
print(f"[{i:2}/{len(MODELS_TO_TEST)}] Testing {model}...")
try:
report = test_model(model)
results.append(report)
print_result(report)
# Save results incrementally every 5 models
if i % 5 == 0:
save_results_incrementally(results)
except KeyboardInterrupt:
print("\nTest interrupted by user. Saving partial results...")
save_results_incrementally(results)
break
except Exception as e:
print(f"Unexpected error testing {model}: {e}")
continue
# Final save
save_results_incrementally(results)
# Summary
not_found = sum(1 for r in results if r.not_found)
strict_support = sum(1 for r in results if r.supports_strict)
json_support = sum(1 for r in results if r.supports_json)
print("\n" + "=" * 80)
print("SUMMARY:")
print(f"Total models tested: {len(results)}")
print(f"Models not found: {not_found}")
print(f"Models with strict support: {strict_support}")
print(f"Models with JSON support: {json_support}")
print("Results saved to: openai_model_test_results.json")
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment