test
This commit is contained in:
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
venv/
|
||||
119
main.py
Normal file
119
main.py
Normal file
@@ -0,0 +1,119 @@
|
||||
from llama_cpp import Llama
|
||||
from langchain_core.language_models import LLM
|
||||
from langchain.prompts import PromptTemplate
|
||||
from langchain.agents import AgentExecutor, create_react_agent
|
||||
from langchain.tools import Tool
|
||||
from typing import Optional, List
|
||||
from googlesearch import search
|
||||
from bs4 import BeautifulSoup
|
||||
import requests
|
||||
# --- LLaMA-CPP model setup ---
|
||||
llm_model = Llama(
|
||||
model_path="ReAction-1.5B.Q5_K_M.gguf",
|
||||
n_ctx=2**21,
|
||||
n_threads=8,
|
||||
use_mlock=True,
|
||||
verbose=False
|
||||
)
|
||||
|
||||
# --- LangChain wrapper ---
|
||||
class LlamaCppLLM(LLM):
|
||||
@property
|
||||
def _llm_type(self) -> str:
|
||||
return "llama-cpp"
|
||||
|
||||
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
|
||||
result = llm_model(
|
||||
prompt,
|
||||
stop=stop,
|
||||
max_tokens=1024,
|
||||
echo=False,
|
||||
|
||||
)
|
||||
output = result["choices"][0]["text"].strip()
|
||||
return output
|
||||
|
||||
custom_llm = LlamaCppLLM()
|
||||
|
||||
# --- Tool definition ---
|
||||
def extract_text(html):
|
||||
soup = BeautifulSoup(html, "html.parser")
|
||||
# Remove scripts and styles
|
||||
for tag in soup(["script", "style"]):
|
||||
tag.decompose()
|
||||
text = soup.get_text(separator=" ", strip=True)
|
||||
return text
|
||||
def weather(city: str) -> str:
|
||||
return f"The weather in {city} is Sunny."
|
||||
def search_google(query:str):
|
||||
print(f"serach tool called {query}")
|
||||
return [url for url in search(query,stop=5)]
|
||||
def get_webhtml(url):
|
||||
print(url)
|
||||
url = url.replace("'","")
|
||||
if url:
|
||||
r = requests.get(url)
|
||||
else:
|
||||
return None
|
||||
return extract_text(r.text) if r.status_code == 200 else None
|
||||
weather_tool = Tool(
|
||||
name="weather",
|
||||
func=weather,
|
||||
description="Use this tool to get the weather of a given city. Input should be the city name."
|
||||
)
|
||||
search_tool = Tool(
|
||||
name = "search_tool",
|
||||
func=search_google,
|
||||
description="Returns the first 5 results from google with a specific query"
|
||||
)
|
||||
html_tool = Tool(
|
||||
name="get_webhtml",
|
||||
func=get_webhtml,
|
||||
description="Given a url, it provides the HTML for it. If it returns None, the website isn't available."
|
||||
)
|
||||
|
||||
# --- Custom ReAct prompt ---
|
||||
prompt_template = PromptTemplate.from_template("""
|
||||
You are a helpful assistant that can use tools to answer questions.
|
||||
|
||||
TOOLS:
|
||||
{tools}
|
||||
|
||||
FORMAT:
|
||||
Question: the input question
|
||||
Thought: think about what to do
|
||||
Action: pick one of [{tool_names}]
|
||||
Action Input: "<the input to the action>"
|
||||
|
||||
Observation: result of the action
|
||||
... (you can repeat Thought/Action/Observation until you reach a Final Answer) ...
|
||||
... (Also, only use Action Input AFTER you use Action, so before you use Action input again, you have to call a tool again. Reach a final Answer before 3 iterations) ...
|
||||
Thought: I now know the final answer
|
||||
Final Answer: <the final answer>
|
||||
|
||||
|
||||
NEW QUESTION:
|
||||
|
||||
Question: {input}
|
||||
{agent_scratchpad}
|
||||
""")
|
||||
|
||||
# --- Create ReAct agent ---
|
||||
react_agent = create_react_agent(
|
||||
llm=custom_llm,
|
||||
tools=[search_tool,html_tool],
|
||||
prompt=prompt_template
|
||||
)
|
||||
|
||||
agent = AgentExecutor(
|
||||
agent=react_agent,
|
||||
tools=[search_tool,html_tool],
|
||||
verbose=True,
|
||||
handle_parsing_errors=True,
|
||||
max_iterations=5
|
||||
)
|
||||
|
||||
# --- Run it ---
|
||||
response = agent.invoke({"input": "What is the weather in San Francisco"})
|
||||
print("\nFinal Output:\n", response)
|
||||
|
||||
Reference in New Issue
Block a user