DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC = """ Your task is to devise up to 5 highly effective goals and an appropriate role-based name (_GPT) for an autonomous agent, ensuring that the goals are optimally aligned with the successful completion of its assigned task.
The user will provide the task, you will provide only the output in the exact format specified below with no explanation or conversation.
Example input: Help me with marketing my business
Example output: Name: CMOGPT Description: a professional digital marketer AI that assists Solopreneurs in growing their businesses by providing world-class expertise in solving marketing problems for SaaS, content products, agencies, and more. Goals: - Engage in effective problem-solving, prioritization, planning, and supporting execution to address your marketing needs as your virtual Chief Marketing Officer.
- Provide specific, actionable, and concise advice to help you make informed decisions without the use of platitudes or overly wordy explanations.
- Identify and prioritize quick wins and cost-effective campaigns that maximize results with minimal time and budget investment.
- Proactively take the lead in guiding you and offering suggestions when faced with unclear information or uncertainty to ensure your marketing strategy remains on track. """
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC = ( "Task: '{{user_prompt}}'\n" "Respond only with the output in the exact format specified in the system prompt, with no explanation or conversation.\n" )
def run_interaction_loop( agent: Agent, ) -> None: """Run the main interaction loop for the agent.
Args: agent: The agent to run the interaction loop for.
Returns: None """
######################### # Application Main Loop # #########################
while cycles_remaining > 0: logger.debug(f"Cycle budget: {cycle_budget}; remaining: {cycles_remaining}")
######## # Plan # ######## # Have the agent determine the next action to take. with spinner: command_name, command_args, assistant_reply_dict = agent.think()
############### # Update User # ############### # Print the assistant's thoughts and the next command to the user. update_user(config, ai_config, command_name, command_args, assistant_reply_dict)
################### # Execute Command # ################### # Decrement the cycle counter first to reduce the likelihood of a SIGINT # happening during command execution, setting the cycles remaining to 1, # and then having the decrement set it to 0, exiting the application. if command_name != "human_feedback": cycles_remaining -= 1 result = agent.execute(command_name, command_args, user_input)
if result is not None: logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) else: logger.typewriter_log("SYSTEM: ", Fore.YELLOW, "Unable to execute command")
DEFAULT_TRIGGERING_PROMPT = ( "Determine exactly one command to use based on the given goals " "and the progress you have made so far, " "and respond using the JSON schema specified previously:" )
该指令比较简单每次LLM推理前 添加即可:
def construct_prompt( self, cycle_instruction: str, thought_process_id: ThoughtProcessID, ) -> ChatSequence: """Constructs and returns a prompt with the following structure: 1. System prompt 2. Message history of the agent, truncated & prepended with running summary as needed 3. `cycle_instruction`
Params: cycle_instruction: The final instruction for a thinking cycle """
if not cycle_instruction: raise ValueError("No instruction given")
def construct_full_prompt( self, config: Config, prompt_generator: Optional[PromptGenerator] = None ) -> str: """ Returns a prompt to the user with the class information in an organized fashion.
Parameters: None
Returns: full_prompt (str): A string containing the initial prompt for the user including the ai_name, ai_role, ai_goals, and api_budget. """
from autogpt.prompts.prompt import build_default_prompt_generator
prompt_generator = prompt_generator or self.prompt_generator if prompt_generator is None: prompt_generator = build_default_prompt_generator(config) prompt_generator.command_registry = self.command_registry self.prompt_generator = prompt_generator
# Construct full prompt full_prompt_parts = [ f"You are {self.ai_name}, {self.ai_role.rstrip('.')}.", "Your decisions must always be made independently without seeking " "user assistance. Play to your strengths as an LLM and pursue " "simple strategies with no legal complications.", ]
if config.execute_local_commands: # add OS info to prompt os_name = platform.system() os_info = ( platform.platform(terse=True) if os_name != "Linux" else distro.name(pretty=True) )
full_prompt_parts.append(f"The OS you are running on is: {os_info}")
additional_constraints: list[str] = [] if self.api_budget > 0.0: additional_constraints.append( f"It takes money to let you run. " f"Your API budget is ${self.api_budget:.3f}" )
full_prompt_parts.append( "## Constraints\n" "You operate within the following constraints:\n" f"{self._generate_numbered_list(self.constraints + additional_constraints)}\n\n" "## Commands\n" "You have access to the following commands:\n" f"{self._generate_commands()}\n\n" "## Resources\n" "You can leverage access to the following resources:\n" f"{self._generate_numbered_list(self.resources + additional_resources)}\n\n" "## Best practices\n" f"{self._generate_numbered_list(self.best_practices + additional_best_practices)} )
if self.ai_goals: full_prompt_parts.append( "\n".join( [ "## Goals", "For your task, you must fulfill the following goals:", *[f"{i+1}. {goal}" for i, goal in enumerate(self.ai_goals)], ] ) )
return "\n\n".join(full_prompt_parts).strip("\n")
def _generate_commands(self) -> str: command_strings = [] if self.command_registry: command_strings += [ str(cmd) for cmd in self.command_registry.commands.values() if cmd.enabled ]
# Add commands from plugins etc. command_strings += [str(cmd) for cmd in self.commands]
constraints: [ '~4000 word limit for short term memory. Your short term memory is short, so immediately save important information to files.', 'If you are unsure how you previously did something or want to recall past events, thinking about similar events will help you remember.', 'No user assistance', 'Exclusively use the commands listed below e.g. command_name' ] resources: [ 'Internet access for searches and information gathering.', 'Long Term memory management.', 'File output.', 'Command execution' ] best_practices: [ 'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.', 'Constructively self-criticize your big-picture behavior constantly.', 'Reflect on past decisions and strategies to refine your approach.', 'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.' ]
这里对Commands做单独说明,Commands这里是指我们提供给LLM可以使用的工具,每个Commands的格式如下, 会声明
name
,
description
,
parameters,
我们会将这些信息组合到Prompt中。
@command( "web_search", "Searches the web", { "query": { "type": "string", "description": "The search query", "required": True, } }, aliases=["search"], ) def web_search(query: str, agent: Agent, num_results: int = 8) -> str: """Return the results of a Google search
Args: query (str): The search query. num_results (int): The number of results to return.
Returns: str: The results of the search. """ search_results = [] attempts = 0
while attempts < DUCKDUCKGO_MAX_ATTEMPTS: if not query: return json.dumps(search_results)
think过程中指定的响应格式如下所示,可以看到根据响应格式,要求LLM plan - criticism也是一种促进LLM思考更加周全的方式
def response_format_instruction(self, thought_process_id: ThoughtProcessID) -> str: if thought_process_id != "one-shot": raise NotImplementedError(f"Unknown thought process '{thought_process_id}'")
RESPONSE_FORMAT_WITH_COMMAND = """```ts interface Response { thoughts: { // Thoughts text: string; reasoning: string; // Short markdown-style bullet list that conveys the long-term plan plan: string; // Constructive self-criticism criticism: string; // Summary of thoughts to say to the user speak: string; }; command: { name: string; args: Record; }; } ```"""
RESPONSE_FORMAT_WITHOUT_COMMAND = """```ts interface Response { thoughts: { // Thoughts text: string; reasoning: string; // Short markdown-style bullet list that conveys the long-term plan plan: string; // Constructive self-criticism criticism: string; // Summary of thoughts to say to the user speak: string; }; } ```"""
use_functions = self.config.openai_functions and self.command_registry.commands return ( f"Respond strictly with JSON{', and also specify a command to use through a function_call' if use_functions else ''}. " "The JSON should be compatible with the TypeScript type `Response` from the following:\n" f"{response_format}\n" )
这里额外提下,由于openAI提供了
Function Calling
的API,所以在返回格式上,有2种情况,如果使用
Function Calling
则不返回Command,由
Function Calling
固定字段返回,如果不使用
Function Calling
则指定command的返回格式。关于callFunction比较早之前写过一篇文章进行介绍:
Function Calling-从prompt到fine-tune
。