fix: make model_info field optional in ShowResponse

The `model_info` field in ShowResponse was defined with `Field(alias='model_info')`
without a default value, making it required even though the type is `Optional`.

This caused a ValidationError when the `/api/show` endpoint omits the `model_info`
field, which happens with certain cloud models like:
- glm-4.7:cloud
- qwen3-next:80b-cloud
- deepseek-v3.2:cloud

The fix adds `default=None` to the Field definition, making the field truly optional
(can be absent from input data) and defaulting to `None`.

Fixes #607

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
yurekami 2025-12-29 05:29:10 +09:00
parent d1d704050b
commit a343614ff2
2 changed files with 58 additions and 2 deletions

View File

@ -551,7 +551,7 @@ class ShowResponse(SubscriptableBaseModel):
details: Optional[ModelDetails] = None
modelinfo: Optional[Mapping[str, Any]] = Field(alias='model_info')
modelinfo: Optional[Mapping[str, Any]] = Field(default=None, alias='model_info')
parameters: Optional[str] = None

View File

@ -4,7 +4,7 @@ from pathlib import Path
import pytest
from ollama._types import CreateRequest, Image
from ollama._types import CreateRequest, Image, ShowResponse
def test_image_serialization_bytes():
@ -92,3 +92,59 @@ def test_create_request_serialization_license_list():
request = CreateRequest(model='test-model', license=['MIT', 'Apache-2.0'])
serialized = request.model_dump()
assert serialized['license'] == ['MIT', 'Apache-2.0']
def test_show_response_without_model_info():
"""
Test that ShowResponse can be created without model_info field.
This is a regression test for issue #607 where certain cloud models
(e.g., glm-4.7:cloud, qwen3-next:80b-cloud, deepseek-v3.2:cloud) return
responses without the model_info field, causing a ValidationError.
"""
# Response data without model_info field (as returned by some cloud models)
response_data = {
'modelfile': '# Modelfile generated by "ollama show"',
'template': '{{ .Prompt }}',
'details': {
'parent_model': '',
'format': 'gguf',
'family': 'glm',
'families': ['glm'],
'parameter_size': '9.4B',
'quantization_level': 'Q4_K_M',
},
'capabilities': ['completion'],
'modified_at': '2025-01-01T00:00:00Z',
}
# This should not raise a ValidationError
response = ShowResponse.model_validate(response_data)
assert response.modelfile == '# Modelfile generated by "ollama show"'
assert response.template == '{{ .Prompt }}'
assert response.modelinfo is None # model_info was not provided
assert response.capabilities == ['completion']
def test_show_response_with_model_info():
"""
Test that ShowResponse still works correctly when model_info is provided.
"""
response_data = {
'modelfile': '# Modelfile',
'template': '{{ .Prompt }}',
'model_info': {
'general.architecture': 'llama',
'general.parameter_count': 7000000000,
},
'capabilities': ['completion'],
'modified_at': '2025-01-01T00:00:00Z',
}
response = ShowResponse.model_validate(response_data)
assert response.modelfile == '# Modelfile'
assert response.modelinfo is not None
assert response.modelinfo['general.architecture'] == 'llama'
assert response.modelinfo['general.parameter_count'] == 7000000000