SPARC/tests/test_llm.py
0xWheatyz af4114969a feat: migrate from Anthropic API to OpenRouter
Replace direct Anthropic API integration with OpenRouter to enable
more flexible LLM provider access while maintaining Claude 3.5 Sonnet.

Changes:
- Replace anthropic package with openai in requirements.txt
- Update config to use OPENROUTER_API_KEY instead of ANTHROPIC_API_KEY
- Migrate LLMAnalyzer from Anthropic client to OpenAI client with
  OpenRouter base URL (https://openrouter.ai/api/v1)
- Update model identifier to OpenRouter format: anthropic/claude-3.5-sonnet
- Convert API calls from messages.create() to chat.completions.create()
- Update response parsing to match OpenAI format
- Rename API key parameter in CompanyAnalyzer from anthropic_api_key
  to openrouter_api_key
- Update all tests to mock OpenAI client instead of Anthropic
- Fix client initialization to accept direct API key parameter

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2026-02-22 12:26:56 -05:00

131 lines
5.1 KiB
Python

"""Tests for LLM analysis functionality."""
import pytest
from unittest.mock import Mock, MagicMock
from SPARC.llm import LLMAnalyzer
class TestLLMAnalyzer:
"""Test LLM analyzer initialization and API interaction."""
def test_analyzer_initialization_with_api_key(self, mocker):
"""Test that analyzer initializes with provided API key."""
mock_openai = mocker.patch("SPARC.llm.OpenAI")
analyzer = LLMAnalyzer(api_key="test-key-123")
mock_openai.assert_called_once_with(
api_key="test-key-123",
base_url="https://openrouter.ai/api/v1"
)
assert analyzer.model == "anthropic/claude-3.5-sonnet"
def test_analyzer_initialization_from_config(self, mocker):
"""Test that analyzer loads API key from config when not provided."""
mock_openai = mocker.patch("SPARC.llm.OpenAI")
mock_config = mocker.patch("SPARC.llm.config")
mock_config.openrouter_api_key = "config-key-456"
analyzer = LLMAnalyzer()
mock_openai.assert_called_once_with(
api_key="config-key-456",
base_url="https://openrouter.ai/api/v1"
)
def test_analyze_patent_content(self, mocker):
"""Test single patent content analysis."""
mock_openai = mocker.patch("SPARC.llm.OpenAI")
mock_client = Mock()
mock_openai.return_value = mock_client
# Mock the API response
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="Innovative GPU architecture."))]
mock_client.chat.completions.create.return_value = mock_response
analyzer = LLMAnalyzer(api_key="test-key")
result = analyzer.analyze_patent_content(
patent_content="ABSTRACT: GPU with new cache design...",
company_name="NVIDIA",
)
assert result == "Innovative GPU architecture."
mock_client.chat.completions.create.assert_called_once()
# Verify the prompt includes company name and content
call_args = mock_client.chat.completions.create.call_args
prompt_text = call_args[1]["messages"][0]["content"]
assert "NVIDIA" in prompt_text
assert "GPU with new cache design" in prompt_text
def test_analyze_patent_portfolio(self, mocker):
"""Test portfolio analysis with multiple patents."""
mock_openai = mocker.patch("SPARC.llm.OpenAI")
mock_client = Mock()
mock_openai.return_value = mock_client
# Mock the API response
mock_response = Mock()
mock_response.choices = [
Mock(message=Mock(content="Strong portfolio in AI and graphics."))
]
mock_client.chat.completions.create.return_value = mock_response
analyzer = LLMAnalyzer(api_key="test-key")
patents_data = [
{"patent_id": "US123", "content": "AI acceleration patent"},
{"patent_id": "US456", "content": "Graphics rendering patent"},
]
result = analyzer.analyze_patent_portfolio(
patents_data=patents_data, company_name="NVIDIA"
)
assert result == "Strong portfolio in AI and graphics."
mock_client.chat.completions.create.assert_called_once()
# Verify the prompt includes all patents
call_args = mock_client.chat.completions.create.call_args
prompt_text = call_args[1]["messages"][0]["content"]
assert "US123" in prompt_text
assert "US456" in prompt_text
assert "AI acceleration patent" in prompt_text
assert "Graphics rendering patent" in prompt_text
def test_analyze_patent_portfolio_with_correct_token_limit(self, mocker):
"""Test that portfolio analysis uses higher token limit."""
mock_openai = mocker.patch("SPARC.llm.OpenAI")
mock_client = Mock()
mock_openai.return_value = mock_client
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="Analysis result."))]
mock_client.chat.completions.create.return_value = mock_response
analyzer = LLMAnalyzer(api_key="test-key")
patents_data = [{"patent_id": "US123", "content": "Test content"}]
analyzer.analyze_patent_portfolio(patents_data, "TestCo")
call_args = mock_client.chat.completions.create.call_args
# Portfolio analysis should use 2048 tokens
assert call_args[1]["max_tokens"] == 2048
def test_analyze_single_patent_with_correct_token_limit(self, mocker):
"""Test that single patent analysis uses lower token limit."""
mock_openai = mocker.patch("SPARC.llm.OpenAI")
mock_client = Mock()
mock_openai.return_value = mock_client
mock_response = Mock()
mock_response.choices = [Mock(message=Mock(content="Analysis result."))]
mock_client.chat.completions.create.return_value = mock_response
analyzer = LLMAnalyzer(api_key="test-key")
analyzer.analyze_patent_content("Test content", "TestCo")
call_args = mock_client.chat.completions.create.call_args
# Single patent should use 1024 tokens
assert call_args[1]["max_tokens"] == 1024