-
Notifications
You must be signed in to change notification settings - Fork 4.8k
Expand file tree
/
Copy pathcompletion_create_params.py
More file actions
189 lines (140 loc) · 7.47 KB
/
completion_create_params.py
File metadata and controls
189 lines (140 loc) · 7.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
from __future__ import annotations
from typing import Dict, Union, Iterable, Optional
from typing_extensions import Literal, Required, TypedDict
from .._types import SequenceNotStr
from .chat.chat_completion_stream_options_param import ChatCompletionStreamOptionsParam
__all__ = ["CompletionCreateParamsBase", "CompletionCreateParamsNonStreaming", "CompletionCreateParamsStreaming"]
class CompletionCreateParamsBase(TypedDict, total=False):
model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]]
"""ID of the model to use.
You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
"""
prompt: Required[Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None]]
"""
The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
Note that <|endoftext|> is the document separator that the model sees during
training, so if a prompt is not specified the model will generate as if from the
beginning of a new document.
"""
best_of: Optional[int]
"""
Generates `best_of` completions server-side and returns the "best" (the one with
the highest log probability per token). Results cannot be streamed.
When used with `n`, `best_of` controls the number of candidate completions and
`n` specifies how many to return – `best_of` must be greater than `n`.
**Note:** Because this parameter generates many completions, it can quickly
consume your token quota. Use carefully and ensure that you have reasonable
settings for `max_tokens` and `stop`.
"""
echo: Optional[bool]
"""Echo back the prompt in addition to the completion"""
frequency_penalty: Optional[float]
"""Number between -2.0 and 2.0.
Positive values penalize new tokens based on their existing frequency in the
text so far, decreasing the model's likelihood to repeat the same line verbatim.
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
"""
logit_bias: Optional[Dict[str, int]]
"""Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
[tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
Mathematically, the bias is added to the logits generated by the model prior to
sampling. The exact effect will vary per model, but values between -1 and 1
should decrease or increase likelihood of selection; values like -100 or 100
should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
"""
logprobs: Optional[int]
"""
Include the log probabilities on the `logprobs` most likely output tokens, as
well the chosen tokens. For example, if `logprobs` is 5, the API will return a
list of the 5 most likely tokens. The API will always return the `logprob` of
the sampled token, so there may be up to `logprobs+1` elements in the response.
The maximum value for `logprobs` is 5.
"""
max_tokens: Optional[int]
"""
The maximum number of [tokens](/tokenizer) that can be generated in the
completion.
The token count of your prompt plus `max_tokens` cannot exceed the model's
context length.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens.
"""
n: Optional[int]
"""How many completions to generate for each prompt.
**Note:** Because this parameter generates many completions, it can quickly
consume your token quota. Use carefully and ensure that you have reasonable
settings for `max_tokens` and `stop`.
"""
presence_penalty: Optional[float]
"""Number between -2.0 and 2.0.
Positive values penalize new tokens based on whether they appear in the text so
far, increasing the model's likelihood to talk about new topics.
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
"""
seed: Optional[int]
"""
If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
the same result.
Determinism is not guaranteed, and you should refer to the `system_fingerprint`
response parameter to monitor changes in the backend.
"""
stop: Union[Optional[str], SequenceNotStr[str], None]
"""Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
"""
stream_options: Optional[ChatCompletionStreamOptionsParam]
"""Options for streaming response. Only set this when you set `stream: true`."""
suffix: Optional[str]
"""The suffix that comes after a completion of inserted text.
This parameter is only supported for `gpt-3.5-turbo-instruct`.
"""
temperature: Optional[float]
"""What sampling temperature to use, between 0 and 2.
Higher values like 0.8 will make the output more random, while lower values like
0.2 will make it more focused and deterministic.
We generally recommend altering this or `top_p` but not both.
"""
top_p: Optional[float]
"""
An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
"""
user: str
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False):
stream: Optional[Literal[False]]
"""Whether to stream back partial progress.
If set, tokens will be sent as data-only
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
as they become available, with the stream terminated by a `data: [DONE]`
message.
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
"""
class CompletionCreateParamsStreaming(CompletionCreateParamsBase):
stream: Required[Literal[True]]
"""Whether to stream back partial progress.
If set, tokens will be sent as data-only
[server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
as they become available, with the stream terminated by a `data: [DONE]`
message.
[Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
"""
CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]