SPARC/tmp/99-7861-prompt

174 lines
6.4 KiB
Plaintext

<DIRECTIONS>
How can I make this run just print the text instead of making a call to an LLM? I am looking to test before wasting any credits.
</DIRECTIONS>
<Context>
You receive a selection in neovim that you need to replace with new code.
The selection's contents may contain notes, incorporate the notes every time if there are some.
consider the context of the selection and what you are suppose to be implementing
<SELECTION_LOCATION>
range(point(11,4),point(56,1))
</SELECTION_LOCATION>
<SELECTION_CONTENT>
def __init__(self, api_key: str | None = None):
"""Initialize the LLM analyzer.
Args:
api_key: Anthropic API key. If None, will attempt to load from config.
"""
if config.anthropic_api_key:
self.client = Anthropic(api_key=api_key or config.anthropic_api_key)
self.model = "claude-3-5-sonnet-20241022"
def analyze_patent_content(self, patent_content: str, company_name: str) -> str:
"""Analyze patent content to estimate company innovation and performance.
Args:
patent_content: Minimized patent text (abstract, claims, summary)
company_name: Name of the company for context
Returns:
Analysis text describing innovation quality and potential impact
"""
prompt = f"""You are a patent analyst evaluating {company_name}'s innovation strategy.
Analyze the following patent content and provide insights on:
1. Innovation quality and novelty
2. Technical complexity and defensibility
3. Market potential and commercial viability
4. Strategic positioning relative to industry trends
Patent Content:
{patent_content}
Provide a concise analysis (2-3 paragraphs) focusing on what this patent reveals about the company's technical direction and competitive advantage."""
if self.client:
message = self.client.messages.create(
model=self.model,
max_tokens=1024,
messages=[{"role": "user", "content": prompt}],
)
else:
with open(f"AI_Prompts/{company_name}", "w") as f:
f.write(prompt)
return True
return message.content[0].text
</SELECTION_CONTENT>
<FILE_CONTAINING_SELECTION>
"""LLM integration for patent analysis using Anthropic's Claude."""
from anthropic import Anthropic
from SPARC import config
from typing import Dict
class LLMAnalyzer:
"""Handles LLM-based analysis of patent content."""
def __init__(self, api_key: str | None = None):
"""Initialize the LLM analyzer.
Args:
api_key: Anthropic API key. If None, will attempt to load from config.
"""
if config.anthropic_api_key:
self.client = Anthropic(api_key=api_key or config.anthropic_api_key)
self.model = "claude-3-5-sonnet-20241022"
def analyze_patent_content(self, patent_content: str, company_name: str) -> str:
"""Analyze patent content to estimate company innovation and performance.
Args:
patent_content: Minimized patent text (abstract, claims, summary)
company_name: Name of the company for context
Returns:
Analysis text describing innovation quality and potential impact
"""
prompt = f"""You are a patent analyst evaluating {company_name}'s innovation strategy.
Analyze the following patent content and provide insights on:
1. Innovation quality and novelty
2. Technical complexity and defensibility
3. Market potential and commercial viability
4. Strategic positioning relative to industry trends
Patent Content:
{patent_content}
Provide a concise analysis (2-3 paragraphs) focusing on what this patent reveals about the company's technical direction and competitive advantage."""
if self.client:
message = self.client.messages.create(
model=self.model,
max_tokens=1024,
messages=[{"role": "user", "content": prompt}],
)
else:
with open(f"AI_Prompts/{company_name}", "w") as f:
f.write(prompt)
return True
return message.content[0].text
def analyze_patent_portfolio(
self, patents_data: list[Dict[str, str]], company_name: str
) -> str:
"""Analyze multiple patents to estimate overall company performance.
Args:
patents_data: List of dicts, each containing 'patent_id' and 'content'
company_name: Name of the company being analyzed
Returns:
Comprehensive analysis of company's innovation trajectory and outlook
"""
# Combine all patent summaries
portfolio_summary = []
for idx, patent in enumerate(patents_data, 1):
portfolio_summary.append(
f"Patent {idx} ({patent['patent_id']}):\n{patent['content']}"
)
combined_content = "\n\n---\n\n".join(portfolio_summary)
prompt = f"""You are analyzing {company_name}'s patent portfolio to estimate their future performance and innovation trajectory.
You have {len(patents_data)} recent patents to analyze. Evaluate the portfolio holistically:
1. Innovation Trends: What technology areas are they focusing on?
2. Strategic Direction: What does this reveal about their business strategy?
3. Competitive Position: How defensible are these innovations?
4. Market Outlook: What market opportunities do these patents target?
5. Performance Forecast: Based on this innovation activity, what's your assessment of their likely performance?
Patent Portfolio:
{combined_content}
Provide a comprehensive analysis (4-5 paragraphs) with a final verdict on the company's innovation strength and performance outlook."""
message = self.client.messages.create(
model=self.model,
max_tokens=2048,
messages=[{"role": "user", "content": prompt}],
)
return message.content[0].text
</FILE_CONTAINING_SELECTION>
</Context>
<MustObey>
NEVER alter any file other than TEMP_FILE.
never provide the requested changes as conversational output. Return only the code.
ONLY provide requested changes by writing the change to TEMP_FILE
never attempt to read TEMP_FILE.
It is purely for output.
Previous contents, which may not exist, can be written over without worry
After writing TEMP_FILE once you should be done. Be done and end the session.
</MustObey>
<TEMP_FILE>/home/l-wyatt/Documents/side-work/SPARC/tmp/99-7861</TEMP_FILE>