|
| 1 | +function Get-GPT3Completion { |
| 2 | + [CmdletBinding()] |
| 3 | + <# |
| 4 | + .SYNOPSIS |
| 5 | + Get a completion from the OpenAI GPT-3 API |
| 6 | +
|
| 7 | + .DESCRIPTION |
| 8 | + Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position |
| 9 | +
|
| 10 | + .PARAMETER prompt |
| 11 | + The prompt to generate completions for |
| 12 | +
|
| 13 | + .PARAMETER model |
| 14 | + ID of the model to use. Defaults to 'text-davinci-003' |
| 15 | +
|
| 16 | + .PARAMETER temperature |
| 17 | + The temperature used to control the model's likelihood to take risky actions. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. Defaults to 0 |
| 18 | +
|
| 19 | + .PARAMETER max_tokens |
| 20 | + The maximum number of tokens to generate. By default, this will be 64 if the prompt is not provided, and 1 if a prompt is provided. The maximum is 2048 |
| 21 | +
|
| 22 | + .PARAMETER top_p |
| 23 | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. Defaults to 1 |
| 24 | +
|
| 25 | + .PARAMETER frequency_penalty |
| 26 | + A value between 0 and 1 that penalizes new tokens based on whether they appear in the text so far. Defaults to 0 |
| 27 | +
|
| 28 | + .PARAMETER presence_penalty |
| 29 | + A value between 0 and 1 that penalizes new tokens based on whether they appear in the text so far. Defaults to 0 |
| 30 | +
|
| 31 | + .PARAMETER stop |
| 32 | + A list of tokens that will cause the API to stop generating further tokens. By default, the API will stop generating when it hits one of the following tokens: ., !, or ?. |
| 33 | + |
| 34 | + .EXAMPLE |
| 35 | + Get-GPT3Completion -prompt "What is 2%2? - please explain" |
| 36 | + #> |
| 37 | + param( |
| 38 | + [Parameter(Mandatory)] |
| 39 | + $prompt, |
| 40 | + $model = 'text-davinci-003', |
| 41 | + $temperature = 0, |
| 42 | + $max_tokens = 256, |
| 43 | + $top_p = 1, |
| 44 | + $frequency_penalty = 0, |
| 45 | + $presence_penalty = 0, |
| 46 | + $stop, |
| 47 | + [Switch]$Raw |
| 48 | + ) |
| 49 | + |
| 50 | + if ([string]::IsNullOrEmpty($env:OpenAIKey)) { |
| 51 | + throw 'You must set the $env:OpenAIKey environment variable to your OpenAI API key. https://beta.openai.com/account/api-keys' |
| 52 | + } |
| 53 | + |
| 54 | + $body = [ordered]@{ |
| 55 | + model = $model |
| 56 | + prompt = $prompt |
| 57 | + temperature = $temperature |
| 58 | + max_tokens = $max_tokens |
| 59 | + top_p = $top_p |
| 60 | + frequency_penalty = $frequency_penalty |
| 61 | + presence_penalty = $presence_penalty |
| 62 | + stop = $stop |
| 63 | + } |
| 64 | + |
| 65 | + $body = $body | ConvertTo-Json -Depth 5 |
| 66 | + $body = [System.Text.Encoding]::UTF8.GetBytes($body) |
| 67 | + $params = @{ |
| 68 | + Uri = "https://api.openai.com/v1/completions" |
| 69 | + Method = 'Post' |
| 70 | + Headers = @{Authorization = "Bearer $($env:OpenAIKey)" } |
| 71 | + ContentType = 'application/json' |
| 72 | + #body = $body | ConvertTo-Json -Depth 5 |
| 73 | + body = $body |
| 74 | + } |
| 75 | + |
| 76 | + #$params["body"] = [System.Text.Encoding]::UTF8.GetBytes($json) |
| 77 | + |
| 78 | + if ($PSCmdlet.MyInvocation.BoundParameters["Verbose"].IsPresent) { |
| 79 | + if ($env:USERNAME -eq 'finke') { $exclude = 'Headers' } |
| 80 | + |
| 81 | + $params | |
| 82 | + ConvertTo-Json -Depth 10 | |
| 83 | + ConvertFrom-Json | |
| 84 | + Select-Object * -ExcludeProperty $exclude | |
| 85 | + Format-List | |
| 86 | + Out-Host |
| 87 | + } |
| 88 | + |
| 89 | + $result = Invoke-RestMethod @params |
| 90 | + |
| 91 | + if ($Raw) { |
| 92 | + $result |
| 93 | + } |
| 94 | + else { |
| 95 | + $result.choices[0].text |
| 96 | + } |
| 97 | +} |
0 commit comments