feat: migrate from Anthropic API to OpenRouter

Replace direct Anthropic API integration with OpenRouter to enable
more flexible LLM provider access while maintaining Claude 3.5 Sonnet.

Changes:
- Replace anthropic package with openai in requirements.txt
- Update config to use OPENROUTER_API_KEY instead of ANTHROPIC_API_KEY
- Migrate LLMAnalyzer from Anthropic client to OpenAI client with
  OpenRouter base URL (https://openrouter.ai/api/v1)
- Update model identifier to OpenRouter format: anthropic/claude-3.5-sonnet
- Convert API calls from messages.create() to chat.completions.create()
- Update response parsing to match OpenAI format
- Rename API key parameter in CompanyAnalyzer from anthropic_api_key
  to openrouter_api_key
- Update all tests to mock OpenAI client instead of Anthropic
- Fix client initialization to accept direct API key parameter

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
0xWheatyz 2026-02-22 12:26:56 -05:00
parent 8971ebc913
commit af4114969a
6 changed files with 55 additions and 46 deletions

View File

@ -13,13 +13,13 @@ from typing import List
class CompanyAnalyzer:
"""Orchestrates end-to-end company performance analysis via patents."""
def __init__(self, anthropic_api_key: str | None = None):
def __init__(self, openrouter_api_key: str | None = None):
"""Initialize the company analyzer.
Args:
anthropic_api_key: Optional Anthropic API key. If None, loads from config.
openrouter_api_key: Optional OpenRouter API key. If None, loads from config.
"""
self.llm_analyzer = LLMAnalyzer(api_key=anthropic_api_key)
self.llm_analyzer = LLMAnalyzer(api_key=openrouter_api_key)
def analyze_company(self, company_name: str) -> str:
"""Analyze a company's performance based on their patent portfolio.

View File

@ -10,5 +10,5 @@ load_dotenv()
# SerpAPI key for patent search
api_key = os.getenv("API_KEY")
# Anthropic API key for LLM analysis
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
# OpenRouter API key for LLM analysis
openrouter_api_key = os.getenv("OPENROUTER_API_KEY")

View File

@ -1,6 +1,6 @@
"""LLM integration for patent analysis using Anthropic's Claude."""
"""LLM integration for patent analysis using OpenRouter."""
from anthropic import Anthropic
from openai import OpenAI
from SPARC import config
from typing import Dict
@ -12,14 +12,17 @@ class LLMAnalyzer:
"""Initialize the LLM analyzer.
Args:
api_key: Anthropic API key. If None, will attempt to load from config.
api_key: OpenRouter API key. If None, will attempt to load from config.
test_mode: If True, print prompts instead of making API calls
"""
self.test_mode = test_mode
if config.anthropic_api_key and not test_mode:
self.client = Anthropic(api_key=api_key or config.anthropic_api_key)
self.model = "claude-3-5-sonnet-20241022"
if (api_key or config.openrouter_api_key) and not test_mode:
self.client = OpenAI(
api_key=api_key or config.openrouter_api_key,
base_url="https://openrouter.ai/api/v1"
)
self.model = "anthropic/claude-3.5-sonnet"
else:
self.client = None
@ -55,12 +58,12 @@ Provide a concise analysis (2-3 paragraphs) focusing on what this patent reveals
return "[TEST MODE - No API call made]"
if self.client:
message = self.client.messages.create(
response = self.client.chat.completions.create(
model=self.model,
max_tokens=1024,
messages=[{"role": "user", "content": prompt}],
)
return message.content[0].text
return response.choices[0].message.content
def analyze_patent_portfolio(
self, patents_data: list[Dict[str, str]], company_name: str
@ -103,13 +106,13 @@ Provide a comprehensive analysis (4-5 paragraphs) with a final verdict on the co
return "[TEST MODE]"
try:
message = self.client.messages.create(
response = self.client.chat.completions.create(
model=self.model,
max_tokens=2048,
messages=[{"role": "user", "content": prompt}],
)
return message.content[0].text
return response.choices[0].message.content
except AttributeError:
return prompt

View File

@ -4,4 +4,4 @@ pdfplumber
requests
pytest
pytest-mock
anthropic
openai

View File

@ -13,7 +13,7 @@ class TestCompanyAnalyzer:
"""Test analyzer initialization with API key."""
mock_llm = mocker.patch("SPARC.analyzer.LLMAnalyzer")
analyzer = CompanyAnalyzer(anthropic_api_key="test-key")
analyzer = CompanyAnalyzer(openrouter_api_key="test-key")
mock_llm.assert_called_once_with(api_key="test-key")

View File

@ -10,33 +10,39 @@ class TestLLMAnalyzer:
def test_analyzer_initialization_with_api_key(self, mocker):
"""Test that analyzer initializes with provided API key."""
mock_anthropic = mocker.patch("SPARC.llm.Anthropic")
mock_openai = mocker.patch("SPARC.llm.OpenAI")
analyzer = LLMAnalyzer(api_key="test-key-123")
mock_anthropic.assert_called_once_with(api_key="test-key-123")
assert analyzer.model == "claude-3-5-sonnet-20241022"
mock_openai.assert_called_once_with(
api_key="test-key-123",
base_url="https://openrouter.ai/api/v1"
)
assert analyzer.model == "anthropic/claude-3.5-sonnet"
def test_analyzer_initialization_from_config(self, mocker):
"""Test that analyzer loads API key from config when not provided."""
mock_anthropic = mocker.patch("SPARC.llm.Anthropic")
mock_openai = mocker.patch("SPARC.llm.OpenAI")
mock_config = mocker.patch("SPARC.llm.config")
mock_config.anthropic_api_key = "config-key-456"
mock_config.openrouter_api_key = "config-key-456"
analyzer = LLMAnalyzer()
mock_anthropic.assert_called_once_with(api_key="config-key-456")
mock_openai.assert_called_once_with(
api_key="config-key-456",
base_url="https://openrouter.ai/api/v1"
)
def test_analyze_patent_content(self, mocker):
"""Test single patent content analysis."""
mock_anthropic = mocker.patch("SPARC.llm.Anthropic")
mock_openai = mocker.patch("SPARC.llm.OpenAI")
mock_client = Mock()
mock_anthropic.return_value = mock_client
mock_openai.return_value = mock_client
# Mock the API response
mock_response = Mock()
mock_response.content = [Mock(text="Innovative GPU architecture.")]
mock_client.messages.create.return_value = mock_response
mock_response.choices = [Mock(message=Mock(content="Innovative GPU architecture."))]
mock_client.chat.completions.create.return_value = mock_response
analyzer = LLMAnalyzer(api_key="test-key")
result = analyzer.analyze_patent_content(
@ -45,26 +51,26 @@ class TestLLMAnalyzer:
)
assert result == "Innovative GPU architecture."
mock_client.messages.create.assert_called_once()
mock_client.chat.completions.create.assert_called_once()
# Verify the prompt includes company name and content
call_args = mock_client.messages.create.call_args
call_args = mock_client.chat.completions.create.call_args
prompt_text = call_args[1]["messages"][0]["content"]
assert "NVIDIA" in prompt_text
assert "GPU with new cache design" in prompt_text
def test_analyze_patent_portfolio(self, mocker):
"""Test portfolio analysis with multiple patents."""
mock_anthropic = mocker.patch("SPARC.llm.Anthropic")
mock_openai = mocker.patch("SPARC.llm.OpenAI")
mock_client = Mock()
mock_anthropic.return_value = mock_client
mock_openai.return_value = mock_client
# Mock the API response
mock_response = Mock()
mock_response.content = [
Mock(text="Strong portfolio in AI and graphics.")
mock_response.choices = [
Mock(message=Mock(content="Strong portfolio in AI and graphics."))
]
mock_client.messages.create.return_value = mock_response
mock_client.chat.completions.create.return_value = mock_response
analyzer = LLMAnalyzer(api_key="test-key")
patents_data = [
@ -77,10 +83,10 @@ class TestLLMAnalyzer:
)
assert result == "Strong portfolio in AI and graphics."
mock_client.messages.create.assert_called_once()
mock_client.chat.completions.create.assert_called_once()
# Verify the prompt includes all patents
call_args = mock_client.messages.create.call_args
call_args = mock_client.chat.completions.create.call_args
prompt_text = call_args[1]["messages"][0]["content"]
assert "US123" in prompt_text
assert "US456" in prompt_text
@ -89,36 +95,36 @@ class TestLLMAnalyzer:
def test_analyze_patent_portfolio_with_correct_token_limit(self, mocker):
"""Test that portfolio analysis uses higher token limit."""
mock_anthropic = mocker.patch("SPARC.llm.Anthropic")
mock_openai = mocker.patch("SPARC.llm.OpenAI")
mock_client = Mock()
mock_anthropic.return_value = mock_client
mock_openai.return_value = mock_client
mock_response = Mock()
mock_response.content = [Mock(text="Analysis result.")]
mock_client.messages.create.return_value = mock_response
mock_response.choices = [Mock(message=Mock(content="Analysis result."))]
mock_client.chat.completions.create.return_value = mock_response
analyzer = LLMAnalyzer(api_key="test-key")
patents_data = [{"patent_id": "US123", "content": "Test content"}]
analyzer.analyze_patent_portfolio(patents_data, "TestCo")
call_args = mock_client.messages.create.call_args
call_args = mock_client.chat.completions.create.call_args
# Portfolio analysis should use 2048 tokens
assert call_args[1]["max_tokens"] == 2048
def test_analyze_single_patent_with_correct_token_limit(self, mocker):
"""Test that single patent analysis uses lower token limit."""
mock_anthropic = mocker.patch("SPARC.llm.Anthropic")
mock_openai = mocker.patch("SPARC.llm.OpenAI")
mock_client = Mock()
mock_anthropic.return_value = mock_client
mock_openai.return_value = mock_client
mock_response = Mock()
mock_response.content = [Mock(text="Analysis result.")]
mock_client.messages.create.return_value = mock_response
mock_response.choices = [Mock(message=Mock(content="Analysis result."))]
mock_client.chat.completions.create.return_value = mock_response
analyzer = LLMAnalyzer(api_key="test-key")
analyzer.analyze_patent_content("Test content", "TestCo")
call_args = mock_client.messages.create.call_args
call_args = mock_client.chat.completions.create.call_args
# Single patent should use 1024 tokens
assert call_args[1]["max_tokens"] == 1024