asyncdefstart(self)->None:"""Main chat session handler."""try:forserverinself.servers:try:awaitserver.initialize()exceptExceptionase:logging.error(f"Failed to initialize server: {e}")awaitself.cleanup_servers()returnall_tools=[]forserverinself.servers:tools=awaitserver.list_tools()all_tools.extend(tools)tools_description="\n".join([tool.format_for_llm()fortoolinall_tools])system_message=("You are a helpful assistant with access to these tools:\n\n"f"{tools_description}\n""Choose the appropriate tool based on the user's question. ""If no tool is needed, reply directly.\n\n""IMPORTANT: When you need to use a tool, you must ONLY respond with ""the exact JSON object format below, nothing else:\n""{\n"' "tool": "tool-name",\n'' "arguments": {\n'' "argument-name": "value"\n'" }\n""}\n\n""After receiving a tool's response:\n""1. Transform the raw data into a natural, conversational response\n""2. Keep responses concise but informative\n""3. Focus on the most relevant information\n""4. Use appropriate context from the user's question\n""5. Avoid simply repeating the raw data\n\n""Please use only the tools that are explicitly defined above.")messages=[{"role":"system","content":system_message}]whileTrue:try:user_input=input("You: ").strip().lower()ifuser_inputin["quit","exit"]:logging.info("\nExiting...")breakmessages.append({"role":"user","content":user_input})llm_response=self.llm_client.get_response(messages)logging.info("\nAssistant: %s",llm_response)result=awaitself.process_llm_response(llm_response)ifresult!=llm_response:messages.append({"role":"assistant","content":llm_response})messages.append({"role":"system","content":result})final_response=self.llm_client.get_response(messages)logging.info("\nFinal response: %s",final_response)messages.append({"role":"assistant","content":final_response})else:messages.append({"role":"assistant","content":llm_response})exceptKeyboardInterrupt:logging.info("\nExiting...")breakfinally:awaitself.cleanup_servers()...# 省略其他代码
defget_response(self,messages:list[dict[str,str]])->str:"""Get a response from the LLM.
Args:
messages: A list of message dictionaries.
Returns:
The LLM's response as a string.
Raises:
httpx.RequestError: If the request to the LLM fails.
"""url="https://api.groq.com/openai/v1/chat/completions"headers={"Content-Type":"application/json","Authorization":f"Bearer {self.api_key}",}payload={"messages":messages,"model":"llama-3.2-90b-vision-preview","temperature":0.7,"max_tokens":4096,"top_p":1,"stream":False,"stop":None,}try:withhttpx.Client()asclient:response=client.post(url,headers=headers,json=payload)response.raise_for_status()data=response.json()returndata["choices"][0]["message"]["content"]excepthttpx.RequestErrorase:error_message=f"Error getting LLM response: {str(e)}"logging.error(error_message)ifisinstance(e,httpx.HTTPStatusError):status_code=e.response.status_codelogging.error(f"Status code: {status_code}")logging.error(f"Response details: {e.response.text}")return(f"I encountered an error: {error_message}. ""Please try again or rephrase your request.")# 省略部分代码...asyncdefprocess_llm_response(self,llm_response:str)->str:"""Process the LLM response and execute tools if needed.
Args:
llm_response: The response from the LLM.
Returns:
The result of tool execution or the original response.
"""importjsontry:tool_call=json.loads(llm_response)if"tool"intool_calland"arguments"intool_call:logging.info(f"Executing tool: {tool_call['tool']}")logging.info(f"With arguments: {tool_call['arguments']}")forserverinself.servers:tools=awaitserver.list_tools()ifany(tool.name==tool_call["tool"]fortoolintools):try:result=awaitserver.execute_tool(tool_call["tool"],tool_call["arguments"])ifisinstance(result,dict)and"progress"inresult:progress=result["progress"]total=result["total"]percentage=(progress/total)*100logging.info(f"Progress: {progress}/{total} "f"({percentage:.1f}%)")returnf"Tool execution result: {result}"exceptExceptionase:error_msg=f"Error executing tool: {str(e)}"logging.error(error_msg)returnerror_msgreturnf"No server found with tool: {tool_call['tool']}"returnllm_responseexceptjson.JSONDecodeError:returnllm_response