value) {
- if (value == null) {
- this.stateDelta = new ConcurrentHashMap<>();
- } else {
- this.stateDelta = new ConcurrentHashMap<>(value);
+ this.stateDelta = new ConcurrentHashMap<>();
+ if (value != null) {
+ // Convert null values to State.REMOVED to avoid NPEs.
+ value
+ .entrySet()
+ .forEach(
+ entry -> {
+ stateDelta.put(
+ entry.getKey(), Optional.ofNullable(entry.getValue()).orElse(State.REMOVED));
+ });
}
return this;
}
diff --git a/core/src/main/java/com/google/adk/models/chat/ChatCompletionsCommon.java b/core/src/main/java/com/google/adk/models/chat/ChatCompletionsCommon.java
index e26546313..1ed997824 100644
--- a/core/src/main/java/com/google/adk/models/chat/ChatCompletionsCommon.java
+++ b/core/src/main/java/com/google/adk/models/chat/ChatCompletionsCommon.java
@@ -25,6 +25,7 @@
import com.google.genai.types.Part;
import java.util.Base64;
import java.util.Map;
+import java.util.Objects;
import org.jspecify.annotations.Nullable;
/** Shared models for Chat Completions Request and Response. */
@@ -45,6 +46,50 @@ private ChatCompletionsCommon() {}
public static final String METADATA_KEY_SYSTEM_FINGERPRINT = "system_fingerprint";
public static final String METADATA_KEY_SERVICE_TIER = "service_tier";
+ /**
+ * Prefix used to mark refusal content in a text Part, since there is no dedicated field for
+ * refusal content in the Gemini API.
+ */
+ static final String REFUSAL_PREFIX = "[[REFUSAL]]: ";
+
+ /**
+ * Result of splitting a text part into its non-refusal content and refusal content. Either
+ * component may be {@code null} when absent.
+ */
+ record RefusalSplit(@Nullable String content, @Nullable String refusal) {}
+
+ /**
+ * Splits a text Part value into a content portion and a refusal portion based on the {@link
+ * #REFUSAL_PREFIX} sentinel:
+ *
+ *
+ * - If {@code text} starts with the prefix, the entire suffix becomes the refusal and the
+ * content is {@code null}.
+ *
- If {@code text} contains {@code "\n" + REFUSAL_PREFIX} (i.e., the prefix on its own line
+ * after some content), the text is split: everything before the newline is content,
+ * everything after the prefix is refusal.
+ *
- Otherwise the text is returned as content with no refusal. The prefix is intentionally
+ * NOT recognized mid-line without a preceding newline.
+ *
+ *
+ * @param text the raw text from a {@link Part#text()}.
+ * @return a {@link RefusalSplit} with the content and refusal portions.
+ */
+ static RefusalSplit parseRefusalPrefix(String text) {
+ Objects.requireNonNull(text, "text cannot be null");
+ if (text.startsWith(REFUSAL_PREFIX)) {
+ return new RefusalSplit(null, text.substring(REFUSAL_PREFIX.length()));
+ }
+ String separator = "\n" + REFUSAL_PREFIX;
+ int index = text.indexOf(separator);
+ if (index >= 0) {
+ String before = text.substring(0, index);
+ String after = text.substring(index + separator.length());
+ return new RefusalSplit(before.isEmpty() ? null : before, after);
+ }
+ return new RefusalSplit(text, null);
+ }
+
/**
* See
* https://developers.openai.com/api/reference/resources/chat#(resource)%20chat.completions%20%3E%20(model)%20chat_completion_message_tool_call%20%3E%20(schema)
diff --git a/core/src/main/java/com/google/adk/models/chat/ChatCompletionsHttpClient.java b/core/src/main/java/com/google/adk/models/chat/ChatCompletionsHttpClient.java
new file mode 100644
index 000000000..5b2b03a33
--- /dev/null
+++ b/core/src/main/java/com/google/adk/models/chat/ChatCompletionsHttpClient.java
@@ -0,0 +1,256 @@
+/*
+ * Copyright 2026 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.adk.models.chat;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.adk.JsonBaseModel;
+import com.google.adk.models.LlmRequest;
+import com.google.adk.models.LlmResponse;
+import com.google.common.collect.ImmutableMap;
+import com.google.genai.types.HttpOptions;
+import io.reactivex.rxjava3.core.BackpressureStrategy;
+import io.reactivex.rxjava3.core.Flowable;
+import io.reactivex.rxjava3.core.FlowableEmitter;
+import java.io.IOException;
+import java.time.Duration;
+import java.util.Map;
+import java.util.Objects;
+import okhttp3.Call;
+import okhttp3.Callback;
+import okhttp3.HttpUrl;
+import okhttp3.MediaType;
+import okhttp3.OkHttpClient;
+import okhttp3.Request;
+import okhttp3.RequestBody;
+import okhttp3.Response;
+import okhttp3.ResponseBody;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An HTTP client for interacting with OpenAI-compatible chat completions endpoints.
+ *
+ * Supports both non-streaming responses (single {@link LlmResponse} emission) and streaming
+ * Server-Sent Events (SSE) responses (multiple incremental {@link LlmResponse} emissions). See the
+ * OpenAI Chat Completions API
+ * reference for the wire protocol.
+ */
+public class ChatCompletionsHttpClient {
+ private static final Logger logger = LoggerFactory.getLogger(ChatCompletionsHttpClient.class);
+ private static final ObjectMapper objectMapper = JsonBaseModel.getMapper();
+
+ private static final MediaType JSON = MediaType.get("application/json; charset=utf-8");
+
+ /**
+ * Default OkHttp call timeout used when the caller does not supply an {@link HttpOptions}
+ * timeout. Five minutes is long enough for most non-streaming completions and short enough to
+ * prevent indefinite hangs in the common case where the caller does not configure timeouts.
+ * Callers who need infinite (e.g. long batch jobs or open streams) can opt in by passing an
+ * {@link HttpOptions} with {@code timeout() == 0}.
+ */
+ private static final Duration DEFAULT_CALL_TIMEOUT = Duration.ofMinutes(5);
+
+ /**
+ * Shared OkHttpClient instance whose connection pool and thread dispatcher are reused across all
+ * {@link ChatCompletionsHttpClient} instances. Each instance forks this client via {@link
+ * OkHttpClient#newBuilder()} to apply per-instance timeouts without leaking pools.
+ */
+ private static final OkHttpClient SHARED_POOL_CLIENT = new OkHttpClient();
+
+ private final OkHttpClient client;
+ private final HttpUrl completionsUrl;
+ private final ImmutableMap headers;
+
+ /**
+ * Constructs a new {@link ChatCompletionsHttpClient} that facilitates API interaction with the
+ * standard {@code /chat/completions} REST endpoint.
+ *
+ * All configuration is sourced from the supplied {@link HttpOptions}:
+ *
+ *
+ * - {@link HttpOptions#baseUrl()} -- required. The base URL of the chat completions
+ * endpoint. The {@code chat/completions} path segments are appended automatically using
+ * {@link HttpUrl}, which handles trailing slashes and percent-encoding deterministically.
+ * Set via {@code HttpOptions.builder().baseUrl("https://...").build()}.
+ *
- {@link HttpOptions#headers()} -- optional. Extra HTTP headers to include in outgoing
+ * requests. The {@code Content-Type} header is set automatically and cannot be overridden.
+ * Set via {@code HttpOptions.builder().headers(Map.of("Authorization", "Bearer ...")) }.
+ *
- {@link HttpOptions#timeout()} -- optional. Per-call timeout in milliseconds. A missing
+ * timeout defaults to 5 minutes ({@link #DEFAULT_CALL_TIMEOUT}). A timeout of {@code 0} is
+ * respected as the explicit caller opt-in to infinite wait. Set via {@code
+ * HttpOptions.builder().timeout(10_000).build()}.
+ *
+ *
+ * Example:
+ *
+ *
{@code
+ * HttpOptions options =
+ * HttpOptions.builder()
+ * .baseUrl("https://example.com/v1/")
+ * .headers(ImmutableMap.of("Authorization", "Bearer my-token"))
+ * .timeout(30_000)
+ * .build();
+ * ChatCompletionsHttpClient client = new ChatCompletionsHttpClient(options);
+ * }
+ *
+ * @param httpOptions HTTP configuration. Must not be {@code null}, and {@link
+ * HttpOptions#baseUrl()} must be present and parseable as an HTTP(S) URL.
+ * @throws IllegalArgumentException if {@code httpOptions.baseUrl()} is missing or is not a valid
+ * HTTP(S) URL.
+ */
+ public ChatCompletionsHttpClient(HttpOptions httpOptions) {
+ Objects.requireNonNull(httpOptions, "httpOptions cannot be null");
+ String baseUrl =
+ httpOptions
+ .baseUrl()
+ .orElseThrow(() -> new IllegalArgumentException("httpOptions.baseUrl() must be set"));
+ HttpUrl parsedBaseUrl = HttpUrl.parse(baseUrl);
+ if (parsedBaseUrl == null) {
+ throw new IllegalArgumentException(
+ "httpOptions.baseUrl() is not a valid HTTP(S) URL: " + baseUrl);
+ }
+ // Pre-build the completions URL once. HttpUrl.addPathSegment handles trailing slashes,
+ // percent-encoding, and existing path components on baseUrl deterministically.
+ this.completionsUrl =
+ parsedBaseUrl.newBuilder().addPathSegment("chat").addPathSegment("completions").build();
+ // Defensive copy of caller-supplied headers; absent is treated as no extra headers.
+ this.headers =
+ httpOptions
+ .headers()
+ .>map(ImmutableMap::copyOf)
+ .orElse(ImmutableMap.of());
+
+ // Apply custom timeouts per instance. All internal timeouts are bounded by callTimeout.
+ OkHttpClient.Builder builder = SHARED_POOL_CLIENT.newBuilder();
+ builder.connectTimeout(Duration.ZERO);
+ builder.readTimeout(Duration.ZERO);
+ builder.writeTimeout(Duration.ZERO);
+ builder.callTimeout(resolveCallTimeout(httpOptions));
+ this.client = builder.build();
+ }
+
+ /** Resolves the call timeout from HttpOptions. */
+ private static Duration resolveCallTimeout(HttpOptions httpOptions) {
+ if (httpOptions.timeout().isEmpty()) {
+ return DEFAULT_CALL_TIMEOUT;
+ }
+ long timeoutMs = httpOptions.timeout().get();
+ // 0 is treated as no timeout (Duration.ZERO).
+ return timeoutMs == 0L ? Duration.ZERO : Duration.ofMillis(timeoutMs);
+ }
+
+ /**
+ * Generates a conversational response from the chat completions endpoint based on the provided
+ * messages. This encapsulates building the HTTP payload, sending the request to the completions
+ * endpoint, and initiating the handling of complete calls.
+ *
+ * @param llmRequest The request containing the model, configuration, and sequence of messages.
+ * @param stream Whether to request a streaming response.
+ * @return A {@link Flowable} emitting the discrete (or combined) {@link LlmResponse} objects.
+ */
+ public Flowable complete(LlmRequest llmRequest, boolean stream) {
+ return Flowable.defer(
+ () -> {
+ ChatCompletionsRequest dtoRequest =
+ ChatCompletionsRequest.fromLlmRequest(llmRequest, stream);
+ String jsonPayload = objectMapper.writeValueAsString(dtoRequest);
+ logger.trace(
+ "Chat Completion Request: model={}, stream={}, messagesCount={}",
+ dtoRequest.model,
+ dtoRequest.stream,
+ dtoRequest.messages != null ? dtoRequest.messages.size() : 0);
+
+ Request.Builder requestBuilder =
+ new Request.Builder().url(completionsUrl).post(RequestBody.create(jsonPayload, JSON));
+
+ for (Map.Entry entry : headers.entrySet()) {
+ requestBuilder.addHeader(entry.getKey(), entry.getValue());
+ }
+ // Defensively force Content-Type to JSON by replacing instead of appending.
+ requestBuilder.header("Content-Type", JSON.toString());
+
+ Request request = requestBuilder.build();
+ if (stream) {
+ return createStreamingFlowable(request);
+ } else {
+ return createNonStreamingFlowable(request);
+ }
+ });
+ }
+
+ /** Placeholder for streaming responses. Errors with {@link UnsupportedOperationException}. */
+ @SuppressWarnings("UnusedVariable")
+ private Flowable createStreamingFlowable(Request request) {
+ return Flowable.error(
+ new UnsupportedOperationException("Streaming is not yet implemented in this client."));
+ }
+
+ /**
+ * Wraps an OkHttp {@link Callback} in a reactive {@link Flowable} for single-turn, non-streaming
+ * responses.
+ */
+ private Flowable createNonStreamingFlowable(Request request) {
+ return Flowable.create(
+ emitter -> {
+ Call call = client.newCall(request);
+ emitter.setCancellable(call::cancel);
+ call.enqueue(new NonStreamingCallback(emitter));
+ },
+ BackpressureStrategy.BUFFER);
+ }
+
+ /**
+ * Handles OkHttp failure and success callbacks, pushing {@link LlmResponse} results to the given
+ * emitter.
+ */
+ private static final class NonStreamingCallback implements Callback {
+ private final FlowableEmitter emitter;
+
+ NonStreamingCallback(FlowableEmitter emitter) {
+ this.emitter = emitter;
+ }
+
+ @Override
+ public void onFailure(Call call, IOException e) {
+ emitter.tryOnError(e);
+ }
+
+ @Override
+ public void onResponse(Call call, Response response) {
+ try (ResponseBody body = response.body()) {
+ if (!response.isSuccessful()) {
+ String bodyStr = body != null ? body.string() : "";
+ emitter.tryOnError(
+ new IOException("Unexpected code " + response + " - body: " + bodyStr));
+ return;
+ }
+ if (body == null) {
+ emitter.tryOnError(new IOException("Empty response body"));
+ return;
+ }
+
+ String jsonResponse = body.string();
+ ChatCompletionsResponse.ChatCompletion completion =
+ objectMapper.readValue(jsonResponse, ChatCompletionsResponse.ChatCompletion.class);
+ emitter.onNext(completion.toLlmResponse());
+ emitter.onComplete();
+ } catch (Exception e) {
+ emitter.tryOnError(e);
+ }
+ }
+ }
+}
diff --git a/core/src/main/java/com/google/adk/models/chat/ChatCompletionsRequest.java b/core/src/main/java/com/google/adk/models/chat/ChatCompletionsRequest.java
index 4b6747fb1..523c04a5a 100644
--- a/core/src/main/java/com/google/adk/models/chat/ChatCompletionsRequest.java
+++ b/core/src/main/java/com/google/adk/models/chat/ChatCompletionsRequest.java
@@ -21,18 +21,37 @@
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonValue;
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.adk.JsonBaseModel;
+import com.google.adk.models.LlmRequest;
+import com.google.common.collect.ImmutableList;
+import com.google.genai.types.Content;
+import com.google.genai.types.FunctionDeclaration;
+import com.google.genai.types.FunctionResponse;
+import com.google.genai.types.GenerateContentConfig;
+import com.google.genai.types.Part;
+import java.util.ArrayList;
+import java.util.Base64;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Data Transfer Objects for Chat Completion API requests.
*
+ * Can be used to translate from a {@link LlmRequest} into a {@link ChatCompletionsRequest} using
+ * {@link #fromLlmRequest(LlmRequest, boolean)}.
+ *
*
See
* https://developers.openai.com/api/reference/resources/chat/subresources/completions/methods/create
*/
@JsonIgnoreProperties(ignoreUnknown = true)
@JsonInclude(JsonInclude.Include.NON_NULL)
-final class ChatCompletionsRequest {
+public final class ChatCompletionsRequest {
/**
* See
@@ -249,6 +268,321 @@ final class ChatCompletionsRequest {
@JsonProperty("extra_body")
public Map extraBody;
+ private static final Logger logger = LoggerFactory.getLogger(ChatCompletionsRequest.class);
+ private static final ObjectMapper objectMapper = JsonBaseModel.getMapper();
+
+ /**
+ * Converts a standard {@link LlmRequest} into a {@link ChatCompletionsRequest} for
+ * /chat/completions compatible endpoints.
+ *
+ * @param llmRequest The internal source request containing contents, configuration, and tool
+ * definitions.
+ * @param responseStreaming True if the request asks for a streaming response.
+ * @return A populated ChatCompletionsRequest ready for JSON serialization.
+ */
+ public static ChatCompletionsRequest fromLlmRequest(
+ LlmRequest llmRequest, boolean responseStreaming) {
+ ChatCompletionsRequest request = new ChatCompletionsRequest();
+ request.model = llmRequest.model().orElse("");
+ request.stream = responseStreaming;
+ if (responseStreaming) {
+ StreamOptions options = new StreamOptions();
+ options.includeUsage = true;
+ request.streamOptions = options;
+ }
+
+ boolean isOSeries = request.model.matches("^o\\d+(?:-.*)?$");
+
+ List messages = new ArrayList<>();
+
+ llmRequest
+ .config()
+ .flatMap(config -> processSystemInstruction(config, isOSeries))
+ .ifPresent(messages::add);
+
+ for (Content content : llmRequest.contents()) {
+ messages.addAll(processContent(content));
+ }
+
+ request.messages = ImmutableList.copyOf(messages);
+
+ llmRequest
+ .config()
+ .ifPresent(
+ config -> {
+ handleConfigOptions(config, request);
+ handleTools(config, request);
+ });
+
+ return request;
+ }
+
+ /**
+ * Processes the system instruction configuration and returns a mapped Message if present.
+ *
+ * @param config The content generation configuration that may contain a system instruction.
+ * @param isOSeries True if the target model belongs to the OpenAI o-series (e.g., o1, o3), which
+ * requires the "developer" role instead of the standard "system" role.
+ * @return An Optional containing the mapped instruction, or empty if none exists.
+ */
+ private static Optional processSystemInstruction(
+ GenerateContentConfig config, boolean isOSeries) {
+ if (config.systemInstruction().isPresent()) {
+ Message systemMsg = new Message();
+ systemMsg.role = isOSeries ? "developer" : "system";
+ systemMsg.content = new MessageContent(config.systemInstruction().get().text());
+ return Optional.of(systemMsg);
+ }
+ return Optional.empty();
+ }
+
+ /**
+ * Processes incoming content and returns a list of messages resulting from it.
+ *
+ * @param content The incoming content containing parts to map.
+ * @return A list of mapped messages.
+ */
+ private static List processContent(Content content) {
+ Message msg = new Message();
+ String role = content.role().orElse("user");
+ msg.role = role.equals("model") ? "assistant" : role;
+
+ List contentParts = new ArrayList<>();
+ List toolCalls = new ArrayList<>();
+ List toolResponses = new ArrayList<>();
+ List refusals = new ArrayList<>();
+
+ content
+ .parts()
+ .ifPresent(
+ parts -> {
+ for (Part part : parts) {
+ if (part.text().isPresent()) {
+ // Text Parts may carry refusal content prefixed with REFUSAL_PREFIX.
+ ChatCompletionsCommon.RefusalSplit split =
+ ChatCompletionsCommon.parseRefusalPrefix(part.text().get());
+ if (split.content() != null) {
+ ContentPart textPart = new ContentPart();
+ textPart.type = "text";
+ textPart.text = split.content();
+ contentParts.add(textPart);
+ }
+ if (split.refusal() != null) {
+ refusals.add(split.refusal());
+ }
+ } else if (part.inlineData().isPresent()) {
+ contentParts.add(processInlineDataPart(part));
+ } else if (part.fileData().isPresent()) {
+ contentParts.add(processFileDataPart(part));
+ } else if (part.functionCall().isPresent()) {
+ toolCalls.add(processFunctionCallPart(part));
+ } else if (part.functionResponse().isPresent()) {
+ toolResponses.add(processFunctionResponsePart(part));
+ } else if (part.executableCode().isPresent()) {
+ logger.warn("Executable code is not supported in Chat Completion conversion");
+ } else if (part.codeExecutionResult().isPresent()) {
+ logger.warn(
+ "Code execution result is not supported in Chat Completion conversion");
+ }
+ }
+ });
+
+ if (!toolResponses.isEmpty()) {
+ return toolResponses;
+ } else {
+ if (!toolCalls.isEmpty()) {
+ msg.toolCalls = ImmutableList.copyOf(toolCalls);
+ }
+ if (!refusals.isEmpty()) {
+ msg.refusal = String.join("\n", refusals);
+ }
+ if (!contentParts.isEmpty()) {
+ if (contentParts.size() == 1 && Objects.equals(contentParts.get(0).type, "text")) {
+ msg.content = new MessageContent(contentParts.get(0).text);
+ } else {
+ msg.content = new MessageContent(ImmutableList.copyOf(contentParts));
+ }
+ }
+ List messages = new ArrayList<>();
+ messages.add(msg);
+ return messages;
+ }
+ }
+
+ /**
+ * Processes an inline data part and returns a mapped ContentPart.
+ *
+ * @param part The input part containing base64 inline data.
+ * @return The mapped inline data part.
+ */
+ private static ContentPart processInlineDataPart(Part part) {
+ ContentPart imgPart = new ContentPart();
+ imgPart.type = "image_url";
+ ImageUrl imageUrl = new ImageUrl();
+ imageUrl.url =
+ "data:"
+ + part.inlineData().get().mimeType().orElse("image/jpeg")
+ + ";base64,"
+ + Base64.getEncoder().encodeToString(part.inlineData().get().data().get());
+ imgPart.imageUrl = imageUrl;
+ return imgPart;
+ }
+
+ /**
+ * Processes a file data part and returns a mapped ContentPart.
+ *
+ * @param part The input part referencing a stored file via URI.
+ * @return The mapped file data part.
+ */
+ private static ContentPart processFileDataPart(Part part) {
+ ContentPart imgPart = new ContentPart();
+ imgPart.type = "image_url";
+ ImageUrl imageUrl = new ImageUrl();
+ imageUrl.url = part.fileData().get().fileUri().orElse("");
+ imgPart.imageUrl = imageUrl;
+ return imgPart;
+ }
+
+ /**
+ * Processes a function call part and returns a mapped ToolCall.
+ *
+ * @param part The input part containing a requested function call or invocation.
+ * @return The mapped function call tool call.
+ */
+ private static ChatCompletionsCommon.ToolCall processFunctionCallPart(Part part) {
+ com.google.genai.types.FunctionCall fc = part.functionCall().get();
+ ChatCompletionsCommon.ToolCall toolCall = new ChatCompletionsCommon.ToolCall();
+ toolCall.id = fc.id().orElse("call_" + fc.name().orElse("unknown"));
+ toolCall.type = "function";
+ ChatCompletionsCommon.Function function = new ChatCompletionsCommon.Function();
+ function.name = fc.name().orElse("");
+ if (fc.args().isPresent()) {
+ try {
+ function.arguments = objectMapper.writeValueAsString(fc.args().get());
+ } catch (Exception e) {
+ logger.warn("Failed to serialize function arguments", e);
+ }
+ }
+ toolCall.function = function;
+ return toolCall;
+ }
+
+ /**
+ * Processes a function response part and returns a mapped Message.
+ *
+ * @param part The input part containing the execution results of a function.
+ * @return The mapped tool response message.
+ */
+ private static Message processFunctionResponsePart(Part part) {
+ FunctionResponse fr = part.functionResponse().get();
+ Message toolResp = new Message();
+ toolResp.role = "tool";
+ toolResp.toolCallId = fr.id().orElse("");
+ if (fr.response().isPresent()) {
+ try {
+ toolResp.content = new MessageContent(objectMapper.writeValueAsString(fr.response().get()));
+ } catch (Exception e) {
+ logger.warn("Failed to serialize tool response", e);
+ }
+ }
+ return toolResp;
+ }
+
+ /**
+ * Updates the request based on the provided configuration options.
+ *
+ * @param config The content generation configuration containing parameters such as temperature.
+ * @param request The chat completions request to populate with matching options.
+ */
+ private static void handleConfigOptions(
+ GenerateContentConfig config, ChatCompletionsRequest request) {
+ config.temperature().ifPresent(v -> request.temperature = v.doubleValue());
+ config.topP().ifPresent(v -> request.topP = v.doubleValue());
+ config
+ .maxOutputTokens()
+ .ifPresent(
+ v -> {
+ request.maxCompletionTokens = Math.toIntExact(v);
+ });
+ config.stopSequences().ifPresent(v -> request.stop = new StopCondition(v));
+ config.candidateCount().ifPresent(v -> request.n = Math.toIntExact(v));
+ config.presencePenalty().ifPresent(v -> request.presencePenalty = v.doubleValue());
+ config.frequencyPenalty().ifPresent(v -> request.frequencyPenalty = v.doubleValue());
+ config.seed().ifPresent(v -> request.seed = v.longValue());
+
+ if (config.responseJsonSchema().isPresent()) {
+ ResponseFormatJsonSchema format = new ResponseFormatJsonSchema();
+ ResponseFormatJsonSchema.JsonSchema schema = new ResponseFormatJsonSchema.JsonSchema();
+ schema.name = "response_schema";
+ schema.schema =
+ objectMapper.convertValue(
+ config.responseJsonSchema().get(), new TypeReference