response = get_completion("What is the capital of France?")
print(response)
defget_completion_from_messages(messages,
model="gpt-3.5-turbo",
temperature=0,
max_tokens=500):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature, # this is the degree of randomness of the model's output max_tokens=max_tokens, # the maximum number of tokens the model can ouptut )
return response.choices[0].message["content"]
注意 LLM 內部是使用token
# wrong promptresponse = get_completion("Take the letters in lollipop \
and reverse them")
print(response)
# output # "lollipop" in reverse should be "popillol"# correct prompt 使用 - 隔開response = get_completion("""Take the letters in \
l-o-l-l-i-p-o-p and reverse them""")
# output 'p-o-p-i-l-l-o-l'
helper function (chat)
defget_completion_from_messages(messages,
model="gpt-3.5-turbo",
temperature=0,
max_tokens=500):
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature, # this is the degree of randomness of the model's output max_tokens=max_tokens, # the maximum number of tokens the model can ouptut )
return response.choices[0].message["content"]
使用角色定
messages = [
{'role':'system',
'content':"""You are an assistant who\
responds in the style of Dr Seuss."""},
{'role':'user',
'content':"""write me a very short poem\
about a happy carrot"""},
]
response = get_completion_from_messages(messages, temperature=1)
print(response)
2. instruction based :由指令Fine-turn / RLHF(強化學習、人類回饋)
Guidelines
Setup
import openai
import os
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
openai.api_key = os.getenv('OPENAI_API_KEY')
helper function
defget_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0, # this is the degree of randomness of the model's output )
return response.choices[0].message["content"]