1a. If you are using Vertex AI, setup ADC to get credentials: + * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp + * + *
Then set Project, Location, and USE_VERTEXAI flag as environment variables: + * + *
export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT + * + *
export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION + * + *
export GOOGLE_GENAI_USE_VERTEXAI=true + * + *
1b. If you are using Gemini Developer API, set an API key environment variable. You can find a + * list of available API keys here: https://aistudio.google.com/app/apikey + * + *
export GOOGLE_API_KEY=YOUR_API_KEY + * + *
2. Compile the java package and run the sample code. + * + *
mvn clean compile + * + *
mvn exec:java -Dexec.mainClass="com.google.genai.examples.BatchInlinedRequests" + * -Dexec.args="YOUR_MODEL_ID" + */ +package com.google.genai.examples; + +import com.google.common.collect.ImmutableList; +import com.google.genai.Client; +import com.google.genai.types.BatchJob; +import com.google.genai.types.BatchJobSource; +import com.google.genai.types.Content; +import com.google.genai.types.CreateBatchJobConfig; +import com.google.genai.types.GenerateContentConfig; +import com.google.genai.types.InlinedRequest; +import com.google.genai.types.Part; + +/** An example of creating a batch job with inlined requests. */ +public final class BatchInlinedRequests { + + public static void main(String[] args) { + // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API + // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the + // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting + // `GOOGLE_GENAI_USE_VERTEXAI` to "true". + Client client = new Client(); + + if (client.vertexAI()) { + System.out.println("Inlined requests are not supported for Vertex AI backend."); + return; + } else { + System.out.println("Calling GeminiAPI Backend..."); + } + + InlinedRequest request1 = + InlinedRequest.builder() + .contents(Content.builder().parts(Part.fromText("Tell me a one-sentence joke."))) + .config( + GenerateContentConfig.builder() + .systemInstruction( + Content.builder() + .parts( + Part.fromText( + "You are a funny comedian. Always respond with humor and" + + " wit."))) + .temperature(0.5f)) + .build(); + + InlinedRequest request2 = + InlinedRequest.builder() + .contents(Content.builder().parts(Part.fromText("Why is the sky blue?"))) + .config( + GenerateContentConfig.builder() + .systemInstruction( + Content.builder() + .parts( + Part.fromText( + "You are a helpful science teacher. Explain complex concepts in" + + " simple terms."))) + .temperature(0.5f)) + .build(); + + BatchJobSource batchJobSource = + BatchJobSource.builder().inlinedRequests(ImmutableList.of(request1, request2)).build(); + + CreateBatchJobConfig config = + CreateBatchJobConfig.builder().displayName("inlined-requests-job-1").build(); + + BatchJob batchJob = + client.batches.create(Constants.GEMINI_MODEL_NAME, batchJobSource, config); + + System.out.println("Created batch job: " + batchJob.name().get()); + } +} diff --git a/examples/src/main/java/com/google/genai/examples/BatchManagement.java b/examples/src/main/java/com/google/genai/examples/BatchManagement.java new file mode 100644 index 00000000000..f200a1ee6d6 --- /dev/null +++ b/examples/src/main/java/com/google/genai/examples/BatchManagement.java @@ -0,0 +1,132 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Usage: + * + *
1a. If you are using Vertex AI, setup ADC to get credentials: + * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp + * + *
Then set Project, Location, and USE_VERTEXAI flag as environment variables: + * + *
export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT + * + *
export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION + * + *
export GOOGLE_GENAI_USE_VERTEXAI=true + * + *
1b. If you are using Gemini Developer API, set an API key environment variable. You can find a + * list of available API keys here: https://aistudio.google.com/app/apikey + * + *
export GOOGLE_API_KEY=YOUR_API_KEY + * + *
2. Compile the java package and run the sample code. + * + *
mvn clean compile + * + *
mvn exec:java -Dexec.mainClass="com.google.genai.examples.BatchManagement" + * -Dexec.args="YOUR_MODEL_ID" + */ +package com.google.genai.examples; + +import com.google.genai.Client; +import com.google.genai.types.BatchJob; +import com.google.genai.types.BatchJobDestination; +import com.google.genai.types.BatchJobSource; +import com.google.genai.types.Content; +import com.google.genai.types.CreateBatchJobConfig; +import com.google.genai.types.InlinedRequest; +import com.google.genai.types.ListBatchJobsConfig; +import com.google.genai.types.Part; + +/** An example of using the Unified Gen AI Java SDK to do operations on batch jobs. */ +public final class BatchManagement { + + public static void main(String[] args) { + final String modelId; + if (args.length != 0) { + modelId = args[0]; + } else { + modelId = Constants.GEMINI_MODEL_NAME; + } + + // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API + // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the + // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting + // `GOOGLE_GENAI_USE_VERTEXAI` to "true". + // + // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will + // get a `UnsupportedOperationException` if you try to use a service that is not available in + // the backend you are using. + Client client = new Client(); + + if (client.vertexAI()) { + System.out.println("Using Vertex AI"); + // Create a batch job. + BatchJobSource batchJobSource = + BatchJobSource.builder() + .gcsUri("gs://unified-genai-tests/batches/input/generate_content_requests.jsonl") + .format("jsonl") + .build(); + CreateBatchJobConfig config = + CreateBatchJobConfig.builder() + .displayName("summarize the pdf") + .dest( + BatchJobDestination.builder() + .gcsUri("gs://unified-genai-tests/batches/output") + .format("jsonl")) + .build(); + BatchJob batchJob1 = client.batches.create(modelId, batchJobSource, config); + System.out.println("Created batch job: " + batchJob1); + // Get the batch job by name. + BatchJob batchJob2 = client.batches.get(batchJob1.name().get(), null); + System.out.println("Get batch job: " + batchJob2); + // Cancel the batch job. + client.batches.cancel(batchJob1.name().get(), null); + System.out.println("Cancelled batch job: " + batchJob1.name().get()); + } else { + System.out.println("Using Gemini Developer API"); + // Create a batch job. + BatchJobSource batchJobSource = + BatchJobSource.builder() + .inlinedRequests( + InlinedRequest.builder() + .contents(Content.builder().parts(Part.fromText("Hello!")))) + .build(); + CreateBatchJobConfig config = + CreateBatchJobConfig.builder().displayName("test-batch-job-java").build(); + BatchJob batchJob1 = client.batches.create(modelId, batchJobSource, config); + System.out.println("Created batch job: " + batchJob1); + // Get the batch job by name. + BatchJob batchJob2 = client.batches.get(batchJob1.name().get(), null); + System.out.println("Get batch job: " + batchJob2); + // Cancel the batch job. + client.batches.cancel(batchJob1.name().get(), null); + System.out.println("Cancelled batch job: " + batchJob1.name().get()); + } + + // List all batch jobs. + System.out.println("List batch jobs resource names: "); + for (BatchJob b : + client.batches.list(ListBatchJobsConfig.builder().pageSize(5).build()).page()) { + System.out.println(b.name().get()); + System.out.println(b.state().get()); + } + + } + + private BatchManagement() {} +} diff --git a/examples/src/main/java/com/google/genai/examples/BatchManagementAsync.java b/examples/src/main/java/com/google/genai/examples/BatchManagementAsync.java new file mode 100644 index 00000000000..df00b9b3d15 --- /dev/null +++ b/examples/src/main/java/com/google/genai/examples/BatchManagementAsync.java @@ -0,0 +1,182 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Usage: + * + *
1a. If you are using Vertex AI, setup ADC to get credentials: + * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp + * + *
Then set Project, Location, and USE_VERTEXAI flag as environment variables: + * + *
export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT + * + *
export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION + * + *
export GOOGLE_GENAI_USE_VERTEXAI=true + * + *
1b. If you are using Gemini Developer API, set an API key environment variable. You can find a + * list of available API keys here: https://aistudio.google.com/app/apikey + * + *
export GOOGLE_API_KEY=YOUR_API_KEY + * + *
2. Compile the java package and run the sample code. + * + *
mvn clean compile + * + *
mvn exec:java -Dexec.mainClass="com.google.genai.examples.BatchManagementAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.AsyncPager;
+import com.google.genai.Client;
+import com.google.genai.types.BatchJob;
+import com.google.genai.types.BatchJobDestination;
+import com.google.genai.types.BatchJobSource;
+import com.google.genai.types.Content;
+import com.google.genai.types.CreateBatchJobConfig;
+import com.google.genai.types.InlinedRequest;
+import com.google.genai.types.ListBatchJobsConfig;
+import com.google.genai.types.Part;
+import java.util.concurrent.CompletableFuture;
+
+/** An example of using the Unified Gen AI Java SDK to do async operations on batch jobs. */
+public final class BatchManagementAsync {
+
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ // Create a batch job.
+ BatchJobSource batchJobSource =
+ BatchJobSource.builder()
+ .bigqueryUri(
+ "bq://vertex-sdk-dev.unified_genai_tests_batches.generate_content_requests")
+ .format("bigquery")
+ .build();
+ CreateBatchJobConfig config =
+ CreateBatchJobConfig.builder()
+ .displayName("test batch")
+ .dest(
+ BatchJobDestination.builder()
+ .bigqueryUri(
+ "bq://vertex-sdk-dev.unified_genai_tests_batches.generate_content_output")
+ .format("bigquery"))
+ .build();
+ CompletableFuture export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.CachedContentOperations"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -48,6 +49,7 @@
import com.google.genai.types.DeleteCachedContentResponse;
import com.google.genai.types.ListCachedContentsConfig;
import com.google.genai.types.Part;
+import com.google.genai.types.UpdateCachedContentConfig;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
@@ -60,6 +62,13 @@
public final class CachedContentOperations {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -89,12 +98,18 @@ public static void main(String[] args) {
.contents(content)
.build();
- CachedContent cachedContent1 = client.caches.create("gemini-2.0-flash-001", config);
+ CachedContent cachedContent1 = client.caches.create(modelId, config);
System.out.println("Created cached content: " + cachedContent1);
// Get the cached content by name.
CachedContent cachedContent2 = client.caches.get(cachedContent1.name().get(), null);
- System.out.println("get cached content: " + cachedContent2);
+ System.out.println("Get cached content: " + cachedContent2);
+
+ CachedContent cachedContent3 =
+ client.caches.update(
+ cachedContent1.name().get(),
+ UpdateCachedContentConfig.builder().ttl(Duration.ofMinutes(10)).build());
+ System.out.println("Update cached content: " + cachedContent3);
// List all cached contents.
System.out.println("List cached contents resrouce names: ");
diff --git a/examples/src/main/java/com/google/genai/examples/CachedContentOperationsAsync.java b/examples/src/main/java/com/google/genai/examples/CachedContentOperationsAsync.java
index 7db6a2c6a1b..62f16eda543 100644
--- a/examples/src/main/java/com/google/genai/examples/CachedContentOperationsAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/CachedContentOperationsAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.CachedContentOperationsAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -65,6 +66,13 @@
public final class CachedContentOperationsAsync {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -98,7 +106,7 @@ public static void main(String[] args) {
contentFuture.thenCompose(
content ->
client.async.caches.create(
- "gemini-2.0-flash-001",
+ modelId,
CreateCachedContentConfig.builder()
.systemInstruction(Content.fromParts(Part.fromText("summarize the pdf")))
.expireTime(Instant.now().plus(Duration.ofHours(1)))
diff --git a/examples/src/main/java/com/google/genai/examples/ChatWithFunctionCall.java b/examples/src/main/java/com/google/genai/examples/ChatWithFunctionCall.java
index c43b72897b4..618d6acb360 100644
--- a/examples/src/main/java/com/google/genai/examples/ChatWithFunctionCall.java
+++ b/examples/src/main/java/com/google/genai/examples/ChatWithFunctionCall.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.ChatWithFunctionCall"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -59,6 +60,13 @@ public static Integer divideTwoIntegers(int numerator, int denominator) {
}
public static void main(String[] args) throws NoSuchMethodException {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -85,7 +93,7 @@ public static void main(String[] args) throws NoSuchMethodException {
GenerateContentConfig.builder().tools(Tool.builder().functions(method1, method2)).build();
// Create a chat session.
- Chat chatSession = client.chats.create("gemini-2.0-flash-001", config);
+ Chat chatSession = client.chats.create(modelId, config);
GenerateContentResponse response1 =
chatSession.sendMessage("what is the weather in San Francisco?");
diff --git a/examples/src/main/java/com/google/genai/examples/ChatWithHistory.java b/examples/src/main/java/com/google/genai/examples/ChatWithHistory.java
index de3f94a0223..f79b0a38db6 100644
--- a/examples/src/main/java/com/google/genai/examples/ChatWithHistory.java
+++ b/examples/src/main/java/com/google/genai/examples/ChatWithHistory.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.ChatWithHistory"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -48,6 +49,13 @@
/** An example of using the Unified Gen AI Java SDK to create a chat session with history. */
public final class ChatWithHistory {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -65,7 +73,7 @@ public static void main(String[] args) {
}
// Create a chat session.
- Chat chatSession = client.chats.create("gemini-2.0-flash-001");
+ Chat chatSession = client.chats.create(modelId);
GenerateContentResponse response =
chatSession.sendMessage("Can you tell me a story about cheese in 100 words?");
diff --git a/examples/src/main/java/com/google/genai/examples/ChatWithHistoryAsync.java b/examples/src/main/java/com/google/genai/examples/ChatWithHistoryAsync.java
index 24901087bfa..24c5b17a71a 100644
--- a/examples/src/main/java/com/google/genai/examples/ChatWithHistoryAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/ChatWithHistoryAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.ChatWithHistoryAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -49,6 +50,13 @@
/** An example of using the Unified Gen AI Java SDK to create an async chat session with history. */
public final class ChatWithHistoryAsync {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -66,7 +74,7 @@ public static void main(String[] args) {
}
// Create an async chat session.
- AsyncChat chatSession = client.async.chats.create("gemini-2.0-flash-001");
+ AsyncChat chatSession = client.async.chats.create(modelId);
CompletableFuture export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.ChatWithHistoryAsyncStreaming"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -53,6 +54,13 @@
*/
public final class ChatWithHistoryAsyncStreaming {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -70,7 +78,7 @@ public static void main(String[] args) {
}
// Create an async chat session.
- AsyncChat chatSession = client.async.chats.create("gemini-2.0-flash-001");
+ AsyncChat chatSession = client.async.chats.create(modelId);
CompletableFuture 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java
+ * -Dexec.mainClass="com.google.genai.examples.ChatWithHistoryAsyncStreamingFunctionCall"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.genai.AsyncChat;
+import com.google.genai.Client;
+import com.google.genai.ResponseStream;
+import com.google.genai.types.Content;
+import com.google.genai.types.FunctionCallingConfig;
+import com.google.genai.types.FunctionResponse;
+import com.google.genai.types.GenerateContentConfig;
+import com.google.genai.types.GenerateContentResponse;
+import com.google.genai.types.Part;
+import com.google.genai.types.Tool;
+import com.google.genai.types.ToolConfig;
+import java.lang.reflect.Method;
+import java.util.concurrent.CompletableFuture;
+
+/**
+ * An example of using the Unified Gen AI Java SDK to create an async streaming chat session with
+ * history.
+ */
+public final class ChatWithHistoryAsyncStreamingFunctionCall {
+ /** A callable function to get the current weather. */
+ public static String getCurrentWeather(String location) {
+ return "The weather in " + location + " is " + "very nice.";
+ }
+
+ public static void main(String[] args) throws NoSuchMethodException {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_3_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Gemini Developer API does not support streaming function calling.");
+ return;
+ }
+
+ // Add the methods as callable functions to the tool.
+ Method method1 =
+ ChatWithHistoryAsyncStreamingFunctionCall.class.getDeclaredMethod(
+ "getCurrentWeather", String.class);
+
+ GenerateContentConfig config =
+ GenerateContentConfig.builder()
+ .tools(Tool.builder().functions(method1))
+ .toolConfig(
+ ToolConfig.builder()
+ .functionCallingConfig(
+ FunctionCallingConfig.builder().streamFunctionCallArguments(true)))
+ .build();
+
+ AsyncChat chatSession = client.async.chats.create(modelId, config);
+
+ CompletableFuture export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.ChatWithHistoryStreaming"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -51,6 +52,13 @@
*/
public final class ChatWithHistoryStreaming {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -68,7 +76,7 @@ public static void main(String[] args) {
}
// Create a chat session.
- Chat chatSession = client.chats.create("gemini-2.0-flash-001");
+ Chat chatSession = client.chats.create(modelId);
ResponseStream 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java
+ * -Dexec.mainClass="com.google.genai.examples.ChatWithHistoryStreamingFunctionCall"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.common.collect.ImmutableMap;
+import com.google.genai.Chat;
+import com.google.genai.Client;
+import com.google.genai.ResponseStream;
+import com.google.genai.types.Content;
+import com.google.genai.types.FunctionCallingConfig;
+import com.google.genai.types.FunctionResponse;
+import com.google.genai.types.GenerateContentConfig;
+import com.google.genai.types.GenerateContentResponse;
+import com.google.genai.types.Part;
+import com.google.genai.types.Tool;
+import com.google.genai.types.ToolConfig;
+import java.lang.reflect.Method;
+
+/**
+ * An example of using the Unified Gen AI Java SDK to create a chat session and stream the response.
+ */
+public final class ChatWithHistoryStreamingFunctionCall {
+ /** A callable function to get the current weather. */
+ public static String getCurrentWeather(String location) {
+ return "The weather in " + location + " is " + "very nice.";
+ }
+
+ public static void main(String[] args) throws NoSuchMethodException {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_3_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Gemini Developer API does not support streaming function calling.");
+ return;
+ }
+
+ // Add the methods as callable functions to the tool.
+ Method method1 =
+ ChatWithHistoryStreamingFunctionCall.class.getDeclaredMethod(
+ "getCurrentWeather", String.class);
+
+ GenerateContentConfig config =
+ GenerateContentConfig.builder()
+ .tools(Tool.builder().functions(method1))
+ .toolConfig(
+ ToolConfig.builder()
+ .functionCallingConfig(
+ FunctionCallingConfig.builder().streamFunctionCallArguments(true)))
+ .build();
+
+ // Create a chat session.
+ Chat chatSession = client.chats.create(modelId, config);
+
+ ResponseStream export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.ComputeTokens"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -45,6 +46,13 @@
/** An example of using the Unified Gen AI Java SDK to compute tokens for simple text input. */
public final class ComputeTokens {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -58,11 +66,12 @@ public static void main(String[] args) {
if (client.vertexAI()) {
System.out.println("Using Vertex AI");
} else {
- System.out.println("Using Gemini Developer API");
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
}
ComputeTokensResponse response =
- client.models.computeTokens("gemini-2.0-flash-001", "What is your name?", null);
+ client.models.computeTokens(modelId, "What is your name?", null);
System.out.println("Compute tokens response: " + response);
}
diff --git a/examples/src/main/java/com/google/genai/examples/Constants.java b/examples/src/main/java/com/google/genai/examples/Constants.java
new file mode 100644
index 00000000000..9bb51eb960e
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/Constants.java
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.google.genai.examples;
+
+/** A final class to hold constants shared across all examples. */
+public final class Constants {
+
+ private Constants() {}
+
+ /** The name of the generative model to be used in the examples. */
+ public static final String GEMINI_MODEL_NAME = "gemini-2.5-flash";
+
+ /** The name of the gemini 3 model to be used in the examples. */
+ public static final String GEMINI_3_MODEL_NAME = "gemini-3-pro-preview";
+
+ /** The name of the live model to be used in the examples. */
+ public static final String GEMINI_LIVE_MODEL_NAME = "gemini-live-2.5-flash";
+
+ /** The name of the preview live model to be used in the examples. */
+ public static final String GEMINI_LIVE_MODEL_NAME_PREVIEW =
+ "gemini-2.5-flash-native-audio-preview-09-2025";
+
+ /** The name of the image generation model to be used in the examples. */
+ public static final String GEMINI_IMAGE_GENERATION_MODEL_NAME = "gemini-2.5-flash-image";
+
+ /** The name of the Imagen generate model to be used in the examples. */
+ public static final String IMAGEN_GENERATE_MODEL_NAME = "imagen-4.0-generate-001";
+
+ /** The name of the Imagen model to be used for image editing in the examples. */
+ public static final String IMAGEN_CAPABILITY_MODEL_NAME = "imagen-3.0-capability-001";
+
+ /** The name of the Imagen ingredients model to be used in the examples. */
+ public static final String IMAGEN_INGREDIENTS_MODEL_NAME = "imagen-4.0-ingredients-preview";
+
+ /** The name of the Virtual try-on model to be used in the examples. */
+ public static final String VIRTUAL_TRY_ON_MODEL_NAME = "virtual-try-on-001";
+
+ /** The name of the segment image model to be used in the examples. */
+ public static final String SEGMENT_IMAGE_MODEL_NAME = "image-segmentation-001";
+
+ /** The name of the Veo model to be used in the examples. */
+ public static final String VEO_MODEL_NAME = "veo-3.1-generate-preview";
+
+ /** The name of the embedding model to be used in the examples. */
+ public static final String EMBEDDING_MODEL_NAME = "text-embedding-004";
+
+ /** The name of the vertex multimodal embedding model to be used in the examples. */
+ public static final String VERTEX_MULTIMODAL_EMBEDDING_MODEL_NAME =
+ "gemini-embedding-2-exp-11-2025";
+
+ /** The file path to be used in the files operations examples. */
+ public static final String UPLOAD_FILE_PATH = "./resources/test.txt";
+}
diff --git a/examples/src/main/java/com/google/genai/examples/CountTokens.java b/examples/src/main/java/com/google/genai/examples/CountTokens.java
index 9ea06ce150b..1e80883700c 100644
--- a/examples/src/main/java/com/google/genai/examples/CountTokens.java
+++ b/examples/src/main/java/com/google/genai/examples/CountTokens.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.CountTokens"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -45,6 +46,13 @@
/** An example of using the Unified Gen AI Java SDK to count tokens for simple text input. */
public final class CountTokens {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -62,7 +70,7 @@ public static void main(String[] args) {
}
CountTokensResponse response =
- client.models.countTokens("gemini-2.0-flash-001", "What is your name?", null);
+ client.models.countTokens(modelId, "What is your name?", null);
System.out.println("Count tokens response: " + response);
}
diff --git a/examples/src/main/java/com/google/genai/examples/CountTokensWithConfigs.java b/examples/src/main/java/com/google/genai/examples/CountTokensWithConfigs.java
index e43cc9399b9..1ac426d41c3 100644
--- a/examples/src/main/java/com/google/genai/examples/CountTokensWithConfigs.java
+++ b/examples/src/main/java/com/google/genai/examples/CountTokensWithConfigs.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.CountTokensWithConfigs"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -53,6 +54,13 @@
*/
public final class CountTokensWithConfigs {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -66,7 +74,10 @@ public static void main(String[] args) {
if (client.vertexAI()) {
System.out.println("Using Vertex AI");
} else {
- System.out.println("Using Gemini Developer API");
+ System.out.println(
+ "Gemini Developer API is not supported for this example since system instruction is not"
+ + " supported.");
+ System.exit(0);
}
// Sets the system instruction in the config.
@@ -78,7 +89,7 @@ public static void main(String[] args) {
.build();
CountTokensResponse response =
- client.models.countTokens("gemini-2.0-flash-001", "Tell me the history of LLM", config);
+ client.models.countTokens(modelId, "Tell me the history of LLM", config);
System.out.println("Response: " + response);
}
diff --git a/examples/src/main/java/com/google/genai/examples/EditImageAsync.java b/examples/src/main/java/com/google/genai/examples/EditImageAsync.java
index 9391e0d8fba..0761645158d 100644
--- a/examples/src/main/java/com/google/genai/examples/EditImageAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/EditImageAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.EditImageAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -49,14 +50,19 @@
import com.google.genai.types.MaskReferenceMode;
import com.google.genai.types.RawReferenceImage;
import com.google.genai.types.ReferenceImage;
-import java.io.IOException;
import java.util.ArrayList;
import java.util.concurrent.CompletableFuture;
-import org.apache.http.HttpException;
/** An example of using the Unified Gen AI Java SDK to edit an image asynchronously. */
public final class EditImageAsync {
- public static void main(String[] args) throws IOException, HttpException {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.IMAGEN_CAPABILITY_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -70,7 +76,8 @@ public static void main(String[] args) throws IOException, HttpException {
if (client.vertexAI()) {
System.out.println("Using Vertex AI");
} else {
- System.out.println("Using Gemini Developer API");
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
}
// Base image created using generateImages with prompt:
@@ -102,10 +109,7 @@ public static void main(String[] args) throws IOException, HttpException {
CompletableFuture 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.EditImageContentReference"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.types.ContentReferenceImage;
+import com.google.genai.types.EditImageConfig;
+import com.google.genai.types.EditImageResponse;
+import com.google.genai.types.Image;
+import com.google.genai.types.ReferenceImage;
+import com.google.genai.types.StyleReferenceConfig;
+import com.google.genai.types.StyleReferenceImage;
+import java.util.ArrayList;
+
+/** An example of using the Unified Gen AI Java SDK to edit an image (Mask reference). */
+public final class EditImageContentReference {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.IMAGEN_INGREDIENTS_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
+ }
+
+ EditImageConfig editImageConfig =
+ EditImageConfig.builder().numberOfImages(1).outputMimeType("image/jpeg").build();
+
+ ArrayList export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.EditImageControlReference"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -54,6 +55,13 @@
/** An example of using the Unified Gen AI Java SDK to edit an image (Control reference). */
public final class EditImageControlReference {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.IMAGEN_CAPABILITY_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -67,7 +75,8 @@ public static void main(String[] args) {
if (client.vertexAI()) {
System.out.println("Using Vertex AI");
} else {
- System.out.println("Using Gemini Developer API");
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
}
// Base image created using generateImages with prompt:
@@ -93,7 +102,7 @@ public static void main(String[] args) {
EditImageResponse editImageResponse =
client.models.editImage(
- "imagen-3.0-capability-001",
+ modelId,
"Change the colors aligning with the scribble map [1].",
referenceImages,
editImageConfig);
diff --git a/examples/src/main/java/com/google/genai/examples/EditImageMaskReference.java b/examples/src/main/java/com/google/genai/examples/EditImageMaskReference.java
index d7d64d12caf..e0044276578 100644
--- a/examples/src/main/java/com/google/genai/examples/EditImageMaskReference.java
+++ b/examples/src/main/java/com/google/genai/examples/EditImageMaskReference.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.EditImageMaskReference"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -56,6 +57,13 @@
/** An example of using the Unified Gen AI Java SDK to edit an image (Mask reference). */
public final class EditImageMaskReference {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.IMAGEN_CAPABILITY_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -69,7 +77,8 @@ public static void main(String[] args) {
if (client.vertexAI()) {
System.out.println("Using Vertex AI");
} else {
- System.out.println("Using Gemini Developer API");
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
}
// Base image created using generateImages with prompt:
@@ -101,10 +110,7 @@ public static void main(String[] args) {
EditImageResponse editImageResponse =
client.models.editImage(
- "imagen-3.0-capability-001",
- "Sunlight and clear sky",
- referenceImages,
- editImageConfig);
+ modelId, "Sunlight and clear sky", referenceImages, editImageConfig);
Image editedImage = editImageResponse.generatedImages().get().get(0).image().get();
// Do something with editedImage.
diff --git a/examples/src/main/java/com/google/genai/examples/EditImageStyleTransfer.java b/examples/src/main/java/com/google/genai/examples/EditImageStyleTransfer.java
index e89667a36dc..0e69cf8754b 100644
--- a/examples/src/main/java/com/google/genai/examples/EditImageStyleTransfer.java
+++ b/examples/src/main/java/com/google/genai/examples/EditImageStyleTransfer.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.EditImageStyleTransfer"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -53,6 +54,13 @@
/** An example of using the Unified Gen AI Java SDK to edit an image (Style transfer). */
public final class EditImageStyleTransfer {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.IMAGEN_CAPABILITY_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -66,7 +74,8 @@ public static void main(String[] args) {
if (client.vertexAI()) {
System.out.println("Using Vertex AI");
} else {
- System.out.println("Using Gemini Developer API");
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
}
// Base image created using generateImages with prompt:
@@ -88,7 +97,7 @@ public static void main(String[] args) {
EditImageResponse editImageResponse =
client.models.editImage(
- "imagen-3.0-capability-001",
+ modelId,
"Generate an image in the style of [1] based on the following caption: A church in the"
+ " mountain.",
referenceImages,
diff --git a/examples/src/main/java/com/google/genai/examples/EditImageSubjectReference.java b/examples/src/main/java/com/google/genai/examples/EditImageSubjectReference.java
index 1ef36675738..0ba00a64f2f 100644
--- a/examples/src/main/java/com/google/genai/examples/EditImageSubjectReference.java
+++ b/examples/src/main/java/com/google/genai/examples/EditImageSubjectReference.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.EditImageSubjectReference"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -54,6 +55,13 @@
/** An example of using the Unified Gen AI Java SDK to edit an image (Subject reference). */
public final class EditImageSubjectReference {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.IMAGEN_CAPABILITY_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -67,7 +75,8 @@ public static void main(String[] args) {
if (client.vertexAI()) {
System.out.println("Using Vertex AI");
} else {
- System.out.println("Using Gemini Developer API");
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
}
// Base image created using generateImages with prompt:
@@ -92,7 +101,7 @@ public static void main(String[] args) {
EditImageResponse editImageResponse =
client.models.editImage(
- "imagen-3.0-capability-001",
+ modelId,
"Generate an image containing a mug with the product logo [1] visible on the side of"
+ " the mug.",
referenceImages,
diff --git a/examples/src/main/java/com/google/genai/examples/EmbedContent.java b/examples/src/main/java/com/google/genai/examples/EmbedContent.java
index 7f5e621a55c..2cf4cba131f 100644
--- a/examples/src/main/java/com/google/genai/examples/EmbedContent.java
+++ b/examples/src/main/java/com/google/genai/examples/EmbedContent.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,15 +36,27 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.EmbedContent"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
import com.google.genai.Client;
+import com.google.genai.types.Content;
import com.google.genai.types.EmbedContentResponse;
+import com.google.genai.types.FileData;
+import com.google.genai.types.Part;
+import java.util.Arrays;
/** An example of using the Unified Gen AI Java SDK to embed content. */
public final class EmbedContent {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.EMBEDDING_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -62,9 +74,27 @@ public static void main(String[] args) {
}
EmbedContentResponse response =
- client.models.embedContent("text-embedding-004", "why is the sky blue?", null);
+ client.models.embedContent(modelId, "why is the sky blue?", null);
System.out.println("Embedding response: " + response);
+
+ if (client.vertexAI()) {
+ System.out.println("Embed content with GCS image example.");
+ Part textPart = Part.builder().text("What is in this image?").build();
+ Part imagePart =
+ Part.builder()
+ .fileData(
+ FileData.builder()
+ .fileUri("gs://cloud-samples-data/generative-ai/image/a-man-and-a-dog.png")
+ .mimeType("image/png")
+ .build())
+ .build();
+ Content content = Content.builder().parts(Arrays.asList(textPart, imagePart)).build();
+ response =
+ client.models.embedContent(
+ Constants.VERTEX_MULTIMODAL_EMBEDDING_MODEL_NAME, content, null);
+ System.out.println("Embedding response with GCS image: " + response);
+ }
}
private EmbedContent() {}
diff --git a/examples/src/main/java/com/google/genai/examples/EmbedContentAsync.java b/examples/src/main/java/com/google/genai/examples/EmbedContentAsync.java
index b8fe0dbb1e4..2a77865a4ff 100644
--- a/examples/src/main/java/com/google/genai/examples/EmbedContentAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/EmbedContentAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,16 +36,28 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.EmbedContentAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
import com.google.genai.Client;
+import com.google.genai.types.Content;
import com.google.genai.types.EmbedContentResponse;
+import com.google.genai.types.FileData;
+import com.google.genai.types.Part;
+import java.util.Arrays;
import java.util.concurrent.CompletableFuture;
/** An example of using the Unified Gen AI Java SDK to embed content asynchronously. */
public final class EmbedContentAsync {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.EMBEDDING_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -63,7 +75,7 @@ public static void main(String[] args) {
}
CompletableFuture export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.EmbedContentWithConfig"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -49,6 +50,13 @@
/** An example of using the Unified Gen AI Java SDK to embed content with extra config. */
public final class EmbedContentWithConfig {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.EMBEDDING_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -69,9 +77,7 @@ public static void main(String[] args) {
EmbedContentResponse response =
client.models.embedContent(
- "text-embedding-004",
- ImmutableList.of("why is the sky blue?", "What is your age?"),
- config);
+ modelId, ImmutableList.of("why is the sky blue?", "What is your age?"), config);
System.out.println("Embedding response: " + response);
}
diff --git a/examples/src/main/java/com/google/genai/examples/FileOperations.java b/examples/src/main/java/com/google/genai/examples/FileOperations.java
index 4d1dd4c5f06..0a54cb0b1a4 100644
--- a/examples/src/main/java/com/google/genai/examples/FileOperations.java
+++ b/examples/src/main/java/com/google/genai/examples/FileOperations.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,7 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.FileOperations"
- * -Dexec.args="./resources/test.txt"
+ * -Dexec.args="path/to/file"
*/
package com.google.genai.examples;
@@ -52,12 +52,12 @@
/** An example of how to use the Files module to upload, retrieve, and delete files. */
public final class FileOperations {
public static void main(String[] args) {
-
- if (args.length == 0) {
- System.out.println("Please provide a file path on the -Dexec.args argument.");
- return;
+ final String filePath;
+ if (args.length != 0) {
+ filePath = args[0];
+ } else {
+ filePath = Constants.UPLOAD_FILE_PATH;
}
- String filePath = args[0];
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
@@ -70,7 +70,8 @@ public static void main(String[] args) {
Client client = new Client();
if (client.vertexAI()) {
- System.out.println("Using Vertex AI");
+ System.out.println("Vertex AI API is not supported for this example.");
+ System.exit(0);
} else {
System.out.println("Using Gemini Developer API");
}
diff --git a/examples/src/main/java/com/google/genai/examples/FileOperationsAsync.java b/examples/src/main/java/com/google/genai/examples/FileOperationsAsync.java
index 9d0384c1861..f35c7ed057a 100644
--- a/examples/src/main/java/com/google/genai/examples/FileOperationsAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/FileOperationsAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -56,12 +56,12 @@
*/
public final class FileOperationsAsync {
public static void main(String[] args) {
-
- if (args.length == 0) {
- System.out.println("Please provide a file path on the -Dexec.args argument.");
- return;
+ final String filePath;
+ if (args.length != 0) {
+ filePath = args[0];
+ } else {
+ filePath = Constants.UPLOAD_FILE_PATH;
}
- String filePath = args[0];
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
@@ -74,7 +74,8 @@ public static void main(String[] args) {
Client client = new Client();
if (client.vertexAI()) {
- System.out.println("Using Vertex AI");
+ System.out.println("Vertex AI API is not supported for this example.");
+ System.exit(0);
} else {
System.out.println("Using Gemini Developer API");
}
diff --git a/examples/src/main/java/com/google/genai/examples/FileSearchStores.java b/examples/src/main/java/com/google/genai/examples/FileSearchStores.java
new file mode 100644
index 00000000000..5a6a7b24e6f
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/FileSearchStores.java
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.FileSearchStores"
+ * -Dexec.args="path/to/file"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.errors.GenAiIOException;
+import com.google.genai.types.Document;
+import com.google.genai.types.File;
+import com.google.genai.types.FileSearchStore;
+import com.google.genai.types.ImportFileOperation;
+import com.google.genai.types.ListFileSearchStoresConfig;
+import com.google.genai.types.UploadFileConfig;
+import com.google.genai.types.UploadToFileSearchStoreOperation;
+
+/**
+ * An example of how to use the FileSearchStores module to upload, retrieve, and delete file search
+ * stores.
+ */
+public final class FileSearchStores {
+ public static void main(String[] args) {
+ final String filePath;
+ if (args.length != 0) {
+ filePath = args[0];
+ } else {
+ filePath = Constants.UPLOAD_FILE_PATH;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Vertex AI API is not supported for this example.");
+ System.exit(0);
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+
+ try {
+ FileSearchStore fileSearchStore = client.fileSearchStores.create(null);
+ System.out.println("Created file store: " + fileSearchStore.name().get());
+
+ // Get the uploaded file search store.
+ FileSearchStore retrievedFileStore =
+ client.fileSearchStores.get(fileSearchStore.name().get(), null);
+ System.out.println("Retrieved file store: " + retrievedFileStore.name().get());
+
+ // List all file stores.
+ System.out.println("List file stores: ");
+ for (FileSearchStore f :
+ client.fileSearchStores.list(ListFileSearchStoresConfig.builder().pageSize(10).build())) {
+ System.out.println(" File store name: " + f.name().get());
+ }
+
+ // Upload a file to the Files Service.
+ File file =
+ client.files.upload(filePath, UploadFileConfig.builder().mimeType("text/plain").build());
+ System.out.println("Uploaded file: " + file.name().get());
+
+ // Import the uploaded file to the file search store.
+ ImportFileOperation importOperation =
+ client.fileSearchStores.importFile(fileSearchStore.name().get(), file.name().get(), null);
+ System.out.println("Import file operation: " + importOperation.name().get());
+ while (importOperation.done().filter(Boolean::booleanValue).isEmpty()) {
+ try {
+ Thread.sleep(5000); // Sleep for 5 seconds.
+ importOperation = client.operations.get(importOperation, null);
+ System.out.println("Waiting for import operation to complete...");
+ } catch (InterruptedException e) {
+ System.out.println("Thread was interrupted while sleeping.");
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ // Upload a file to the file search store.
+ UploadToFileSearchStoreOperation uploadOperation =
+ client.fileSearchStores.uploadToFileSearchStore(
+ fileSearchStore.name().get(), filePath, null);
+ System.out.println("Upload to file search store operation: " + uploadOperation.name().get());
+ while (uploadOperation.done().filter(Boolean::booleanValue).isEmpty()) {
+ try {
+ Thread.sleep(5000); // Sleep for 5 seconds.
+ uploadOperation = client.operations.get(uploadOperation, null);
+ System.out.println("Waiting for upload operation to complete...");
+ } catch (InterruptedException e) {
+ System.out.println("Thread was interrupted while sleeping.");
+ Thread.currentThread().interrupt();
+ }
+ }
+
+ String documentName = uploadOperation.response().get().documentName().get();
+ System.out.println("Uploaded document: " + documentName);
+
+ // Get document
+ Document retrievedDocument = client.fileSearchStores.documents.get(documentName, null);
+ System.out.println("Retrieved document: " + retrievedDocument.name().get());
+
+ // List documents
+ System.out.println("List documents: ");
+ for (Document d :
+ client.fileSearchStores.documents.list(fileSearchStore.name().get(), null)) {
+ System.out.println(" Document name: " + d.name().get());
+ }
+
+ // Delete the imported document
+ client.fileSearchStores.documents.delete(documentName, null);
+ System.out.println("Deleted document: " + documentName);
+
+ // Delete the file search store
+ client.fileSearchStores.delete(fileSearchStore.name().get(), null);
+ System.out.println("Deleted file: " + fileSearchStore.name().get());
+ } catch (GenAiIOException e) {
+ System.out.println("An error occurred while uploading the file: " + e.getMessage());
+ }
+ }
+
+ private FileSearchStores() {}
+}
diff --git a/examples/src/main/java/com/google/genai/examples/FileSearchStoresAsync.java b/examples/src/main/java/com/google/genai/examples/FileSearchStoresAsync.java
new file mode 100644
index 00000000000..2d2d048dedf
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/FileSearchStoresAsync.java
@@ -0,0 +1,216 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.FileSearchStoresAsync"
+ * -Dexec.args="path/to/file"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.types.File;
+import com.google.genai.types.FileSearchStore;
+import com.google.genai.types.ListFileSearchStoresConfig;
+import com.google.genai.types.Operation;
+import com.google.genai.types.UploadFileConfig;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * An example of how to use the FileSearchStores module to upload, retrieve, and delete file search
+ * stores.
+ */
+public final class FileSearchStoresAsync {
+
+ private static export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContent"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -45,6 +46,13 @@
/** An example of using the Unified Gen AI Java SDK to generate content. */
public final class GenerateContent {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -62,10 +70,17 @@ public static void main(String[] args) {
}
GenerateContentResponse response =
- client.models.generateContent("gemini-2.0-flash-001", "What is your name?", null);
+ client.models.generateContent(modelId, "What is your name?", null);
// Gets the text string from the response by the quick accessor method `text()`.
System.out.println("Unary response: " + response.text());
+
+ // Gets the http headers from the response.
+ response
+ .sdkHttpResponse()
+ .ifPresent(
+ httpResponse ->
+ System.out.println("Response headers: " + httpResponse.headers().orElse(null)));
}
private GenerateContent() {}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateContentAsync.java b/examples/src/main/java/com/google/genai/examples/GenerateContentAsync.java
index f50354da19b..655c99ce3f8 100644
--- a/examples/src/main/java/com/google/genai/examples/GenerateContentAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/GenerateContentAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -46,6 +47,13 @@
/** An example of using the Unified Gen AI Java SDK to generate content asynchronously. */
public final class GenerateContentAsync {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -63,13 +71,20 @@ public static void main(String[] args) {
}
CompletableFuture export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentStream"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -46,6 +47,13 @@
/** An example of using the Unified GenAI Java SDK to generate stream of content. */
public final class GenerateContentStream {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -63,12 +71,18 @@ public static void main(String[] args) {
}
ResponseStream 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile exec:java
+ * -Dexec.mainClass="com.google.genai.examples.GenerateContentStreamingFunctionCall"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.ResponseStream;
+import com.google.genai.types.FunctionCallingConfig;
+import com.google.genai.types.GenerateContentConfig;
+import com.google.genai.types.GenerateContentResponse;
+import com.google.genai.types.Tool;
+import com.google.genai.types.ToolConfig;
+import java.lang.reflect.Method;
+
+/** An example of using the Unified Gen AI Java SDK to generate content. */
+public final class GenerateContentStreamingFunctionCall {
+ /** A callable function to get the current weather. */
+ public static String getCurrentWeather(String location) {
+ return "The weather in " + location + " is " + "very nice.";
+ }
+
+ public static void main(String[] args) throws NoSuchMethodException {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_3_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Gemini Developer API does not support streaming function calling.");
+ return;
+ }
+
+ // Add the methods as callable functions to the tool.
+ Method method1 =
+ GenerateContentStreamingFunctionCall.class.getDeclaredMethod(
+ "getCurrentWeather", String.class);
+
+ GenerateContentConfig config =
+ GenerateContentConfig.builder()
+ .tools(Tool.builder().functions(method1))
+ .toolConfig(
+ ToolConfig.builder()
+ .functionCallingConfig(
+ FunctionCallingConfig.builder().streamFunctionCallArguments(true)))
+ .build();
+
+ ResponseStream 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentWithClientOptions"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.types.ClientOptions;
+import com.google.genai.types.GenerateContentResponse;
+import com.google.genai.types.ProxyOptions;
+import com.google.genai.types.ProxyType;
+
+/** An example of setting client options in a GenerateContent request. */
+public final class GenerateContentWithClientOptions {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Set the client options when creating the client. This applies to all requests made through
+ // this client.
+ ClientOptions clientOptions =
+ ClientOptions.builder()
+ .proxyOptions(ProxyOptions.builder().type(ProxyType.Known.DIRECT))
+ .maxConnections(10)
+ .maxConnectionsPerHost(5)
+ .build();
+
+ Client client = Client.builder().clientOptions(clientOptions).build();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+
+ GenerateContentResponse response =
+ client.models.generateContent(modelId, "Tell me the history of LLM in 100 words", null);
+
+ System.out.println("Response: " + response.text());
+ }
+
+ private GenerateContentWithClientOptions() {}
+}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateContentWithConfigs.java b/examples/src/main/java/com/google/genai/examples/GenerateContentWithConfigs.java
index dd7c123121e..770bbeb5751 100644
--- a/examples/src/main/java/com/google/genai/examples/GenerateContentWithConfigs.java
+++ b/examples/src/main/java/com/google/genai/examples/GenerateContentWithConfigs.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentWithConfigs"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -56,6 +57,13 @@
/** An example of using the Unified Gen AI Java SDK to generate content with extra configs. */
public final class GenerateContentWithConfigs {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -100,7 +108,7 @@ public static void main(String[] args) {
.build();
GenerateContentResponse response =
- client.models.generateContent("gemini-2.0-flash-001", "Tell me the history of LLM", config);
+ client.models.generateContent(modelId, "Tell me the history of LLM", config);
System.out.println("Response: " + response.text());
}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateContentWithFunctionCall.java b/examples/src/main/java/com/google/genai/examples/GenerateContentWithFunctionCall.java
index 5e532ef4607..1efca4069de 100644
--- a/examples/src/main/java/com/google/genai/examples/GenerateContentWithFunctionCall.java
+++ b/examples/src/main/java/com/google/genai/examples/GenerateContentWithFunctionCall.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentWithFunctionCall"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -46,6 +47,8 @@
import com.google.genai.types.GenerateContentResponse;
import com.google.genai.types.Tool;
import java.lang.reflect.Method;
+import java.util.List;
+import java.util.ArrayList;
/** An example of using the Unified Gen AI Java SDK to generate content with function calling. */
public final class GenerateContentWithFunctionCall {
@@ -55,11 +58,26 @@ public static String getCurrentWeather(String location, String unit) {
}
/** A callable function to divide two integers. */
- public static Integer divideTwoIntegers(Integer numerator, Integer denominator) {
+ public static Integer divideTwoIntegers(int numerator, int denominator) {
return numerator / denominator;
}
+ public static Integer sumInts(List 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java
+ * -Dexec.mainClass="com.google.genai.examples.GenerateContentWithFunctionCallAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.types.GenerateContentConfig;
+import com.google.genai.types.GenerateContentResponse;
+import com.google.genai.types.Tool;
+import java.lang.reflect.Method;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+
+/**
+ * An example of using the Unified Gen AI Java SDK to generate content with (automatic) function
+ * calling asynchronously.
+ */
+public final class GenerateContentWithFunctionCallAsync {
+ /** A callable function to get the weather. */
+ public static String getCurrentWeather(String location, String unit) {
+ return "The weather in " + location + " is " + "very nice.";
+ }
+
+ /** A callable function to divide two integers. */
+ public static Integer divideTwoIntegers(int numerator, int denominator) {
+ return numerator / denominator;
+ }
+
+ public static void main(String[] args) throws NoSuchMethodException, InterruptedException {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+
+ // Load the two methods as reflected Method objects so that they can be automatically executed
+ // on the client side.
+ Method method1 =
+ GenerateContentWithFunctionCall.class.getMethod(
+ "getCurrentWeather", String.class, String.class);
+ Method method2 =
+ GenerateContentWithFunctionCall.class.getMethod("divideTwoIntegers", int.class, int.class);
+
+ // Add the two methods as callable functions to the list of tools.
+ GenerateContentConfig config =
+ GenerateContentConfig.builder().tools(Tool.builder().functions(method1, method2)).build();
+
+ // --- Asynchronous Call ---
+ CompletableFuture 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentWithFunctionCallJson"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.genai.Client;
+import com.google.genai.types.FunctionDeclaration;
+import com.google.genai.types.GenerateContentConfig;
+import com.google.genai.types.GenerateContentResponse;
+import com.google.genai.types.Tool;
+
+/** An example of using the Unified Gen AI Java SDK to generate content with function calling. */
+public final class GenerateContentWithFunctionCallJson {
+ /** A callable function to get the weather. */
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+
+ // Define the schema for the function declaration, in Json format.
+ ImmutableMap 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentWithFunctionCallJsonString"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.JsonSerializable;
+import com.google.genai.types.FunctionDeclaration;
+import com.google.genai.types.GenerateContentConfig;
+import com.google.genai.types.GenerateContentResponse;
+import com.google.genai.types.Tool;
+
+/** An example of using the Unified Gen AI Java SDK to generate content with function calling. */
+public final class GenerateContentWithFunctionCallJsonString {
+ /** A callable function to get the weather. */
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+
+ // Define the schema for the function declaration, in Json format. Note if you have java 15 or
+ // above, you can use the following string block instead:
+ // String parametersSchemaString =
+ // """{
+ // "type": "object",
+ // "properties": {
+ // "location": {
+ // "type": "string"
+ // }
+ // },
+ // "required": [
+ // "location"
+ // ]
+ // }""";
+ // String responseSchemaString =
+ // """{
+ // "type": "object",
+ // "properties": {
+ // "weather": {
+ // "type": "string"
+ // }
+ // },
+ // "required": [
+ // "weather"
+ // ]
+ // }""";
+ String parametersSchemaString =
+ "{\"type\":\"object\",\"properties\":{\"location\":{\"type\":\"string\"}},\"required\":[\"location\"]}";
+ String responseSchemaString =
+ "{\"type\":\"object\",\"properties\":{\"weather\":{\"type\":\"string\"}},\"required\":[\"weather\"]}";
+
+ // Define the tool with the function declaration.
+ Tool toolWithFunctionDeclarations =
+ Tool.builder()
+ .functionDeclarations(
+ FunctionDeclaration.builder()
+ .name("get_weather")
+ .description("Returns the weather in a given location.")
+ .parametersJsonSchema(JsonSerializable.stringToJsonNode(parametersSchemaString))
+ .responseJsonSchema(JsonSerializable.stringToJsonNode(responseSchemaString))
+ .build())
+ .build();
+
+ // Add the tool to the GenerateContentConfig.
+ GenerateContentConfig config =
+ GenerateContentConfig.builder().tools(toolWithFunctionDeclarations).build();
+
+ GenerateContentResponse response =
+ client.models.generateContent(modelId, "What is the weather in Vancouver?", config);
+
+ System.out.println("The response is: " + response.functionCalls());
+ }
+
+ private GenerateContentWithFunctionCallJsonString() {}
+}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateContentWithHttpOptions.java b/examples/src/main/java/com/google/genai/examples/GenerateContentWithHttpOptions.java
new file mode 100644
index 00000000000..a45e5334e59
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/GenerateContentWithHttpOptions.java
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentWithHttpOptions"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.types.GenerateContentResponse;
+import com.google.genai.types.HttpOptions;
+import com.google.genai.types.HttpRetryOptions;
+
+/** An example of setting http options in a GenerateContent request. */
+public final class GenerateContentWithHttpOptions {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Set the client level http options when creating the client. All the API requests will share
+ // the same http options.
+ HttpOptions httpOptions =
+ HttpOptions.builder()
+ .apiVersion("v1")
+ .timeout(10000)
+ .retryOptions(HttpRetryOptions.builder().attempts(3).httpStatusCodes(408, 429, 504))
+ .build();
+
+ Client client = Client.builder().httpOptions(httpOptions).build();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+
+ GenerateContentResponse response =
+ client.models.generateContent(modelId, "Tell me the history of LLM in 100 words", null);
+
+ System.out.println("Response: " + response.text());
+ }
+
+ private GenerateContentWithHttpOptions() {}
+}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateContentWithImageInput.java b/examples/src/main/java/com/google/genai/examples/GenerateContentWithImageInput.java
index a8cd337da8d..316dbb1ccbd 100644
--- a/examples/src/main/java/com/google/genai/examples/GenerateContentWithImageInput.java
+++ b/examples/src/main/java/com/google/genai/examples/GenerateContentWithImageInput.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentWithImageInput"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -49,6 +50,13 @@
/** An example of using the Unified Gen AI Java SDK to generate content with image input. */
public final class GenerateContentWithImageInput {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -62,9 +70,10 @@ public static void main(String[] args) {
if (client.vertexAI()) {
System.out.println("Using Vertex AI");
} else {
- throw new IllegalArgumentException(
+ System.out.println(
"This example is not supported for Gemini Developer API since the image uri from GCS is"
+ " only supported in Vertex AI.");
+ System.exit(0);
}
Content content =
@@ -72,8 +81,7 @@ public static void main(String[] args) {
Part.fromText("describe the image"),
Part.fromUri("gs://cloud-samples-data/generative-ai/image/scones.jpg", "image/jpeg"));
- GenerateContentResponse response =
- client.models.generateContent("gemini-2.0-flash-001", content, null);
+ GenerateContentResponse response = client.models.generateContent(modelId, content, null);
System.out.println("Response: " + response.text());
}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseJsonSchema.java b/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseJsonSchema.java
new file mode 100644
index 00000000000..b6ec9707d3f
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseJsonSchema.java
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java
+ * -Dexec.mainClass="com.google.genai.examples.GenerateContentWithResponseJsonSchema"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.genai.Client;
+import com.google.genai.types.GenerateContentConfig;
+import com.google.genai.types.GenerateContentResponse;
+
+/**
+ * GenerateContentWithResponseJsonSchema generates a content and returns a json object by passing a
+ * schema.
+ */
+public final class GenerateContentWithResponseJsonSchema {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+
+ ImmutableMap 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java
+ * -Dexec.mainClass="com.google.genai.examples.GenerateContentWithResponseJsonSchemaString"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.JsonSerializable;
+import com.google.genai.types.GenerateContentConfig;
+import com.google.genai.types.GenerateContentResponse;
+
+/**
+ * GenerateContentWithResponseJsonSchema generates a content and returns a json object by passing a
+ * schema.
+ */
+public final class GenerateContentWithResponseJsonSchemaString {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+ // Note if you have java 15 or above, you can use the following string block instead:
+ // String schema = """{
+ // "type": "object",
+ // "properties": {
+ // "recipe_name": {
+ // "type": "string"
+ // },
+ // "ingredients": {
+ // "type": "array",
+ // "items": {
+ // "type": "string"
+ // }
+ // }
+ // },
+ // "required": [
+ // "recipe_name",
+ // "ingredients"
+ // ]
+ // }""";
+ String schema =
+ "{\"type\":\"object\",\"properties\":{\"recipe_name\":{\"type\":\"string\"},\"ingredients\":{\"type\":\"array\",\"items\":{\"type\":\"string\"}}},\"required\":[\"recipe_name\",\"ingredients\"]}";
+
+ GenerateContentConfig config =
+ GenerateContentConfig.builder()
+ .responseMimeType("application/json")
+ .candidateCount(1)
+ .responseJsonSchema(JsonSerializable.stringToJsonNode(schema))
+ .build();
+
+ GenerateContentResponse response =
+ client.models.generateContent(modelId, "List a few popular cookie recipes.", config);
+
+ System.out.println("Response: " + response.text());
+ }
+
+ private GenerateContentWithResponseJsonSchemaString() {}
+}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseModality.java b/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseModality.java
index 89b65325a64..55c5e1d08c3 100644
--- a/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseModality.java
+++ b/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseModality.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentWithResponseModality"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -48,6 +49,13 @@
/** An example of using the Unified Gen AI Java SDK to generate content with response modality. */
public final class GenerateContentWithResponseModality {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_IMAGE_GENERATION_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -68,10 +76,7 @@ public static void main(String[] args) {
GenerateContentConfig.builder().responseModalities("TEXT", "IMAGE").build();
GenerateContentResponse response =
- client.models.generateContent(
- "gemini-2.0-flash-preview-image-generation",
- "Generate a cat image and describe it.",
- config);
+ client.models.generateContent(modelId, "Generate a cat image and describe it.", config);
System.out.println("Response: " + response.toJson());
}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseSchema.java b/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseSchema.java
index 10a1ed807fd..ea44ba2c6d2 100644
--- a/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseSchema.java
+++ b/examples/src/main/java/com/google/genai/examples/GenerateContentWithResponseSchema.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.GenerateContentWithResponseSchema"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -54,6 +55,13 @@
*/
public final class GenerateContentWithResponseSchema {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -95,8 +103,7 @@ public static void main(String[] args) {
.build();
GenerateContentResponse response =
- client.models.generateContent(
- "gemini-2.0-flash-001", "List a few popular cookie recipes.", config);
+ client.models.generateContent(modelId, "List a few popular cookie recipes.", config);
System.out.println("Response: " + response.text());
}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateImages.java b/examples/src/main/java/com/google/genai/examples/GenerateImages.java
index b050211ab68..d6704632746 100644
--- a/examples/src/main/java/com/google/genai/examples/GenerateImages.java
+++ b/examples/src/main/java/com/google/genai/examples/GenerateImages.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.GenerateImages"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -47,6 +48,13 @@
/** An example of using the Unified Gen AI Java SDK to generate images. */
public final class GenerateImages {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.IMAGEN_GENERATE_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -72,12 +80,13 @@ public static void main(String[] args) {
GenerateImagesResponse generatedImagesResponse =
client.models.generateImages(
- "imagen-3.0-generate-002", "Robot holding a red skateboard", generateImagesConfig);
+ modelId, "Robot holding a red skateboard", generateImagesConfig);
- System.out.println(
- "Generated " + generatedImagesResponse.generatedImages().get().size() + " images.");
-
- Image generatedImage = generatedImagesResponse.generatedImages().get().get(0).image().get();
+ if (generatedImagesResponse.images().isEmpty()) {
+ System.out.println("Unable to generate images.");
+ }
+ System.out.println("Generated " + generatedImagesResponse.images().size() + " images.");
+ Image generatedImage = generatedImagesResponse.images().get(0);
// Do something with the image.
System.out.println(
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateImagesAsync.java b/examples/src/main/java/com/google/genai/examples/GenerateImagesAsync.java
index 4320bda8c6d..2556b5e472a 100644
--- a/examples/src/main/java/com/google/genai/examples/GenerateImagesAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/GenerateImagesAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.GenerateImagesAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -48,6 +49,13 @@
/** An example of using the Unified Gen AI Java SDK to generate images asynchronously. */
public final class GenerateImagesAsync {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.IMAGEN_GENERATE_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -73,7 +81,7 @@ public static void main(String[] args) {
CompletableFuture export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,18 +36,28 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.GenerateVideos"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
import com.google.genai.Client;
+import com.google.genai.JsonSerializable;
import com.google.genai.errors.GenAiIOException;
import com.google.genai.types.GenerateVideosConfig;
import com.google.genai.types.GenerateVideosOperation;
+import com.google.genai.types.GenerateVideosSource;
import com.google.genai.types.Video;
/** An example of using the Unified Gen AI Java SDK to generate videos. */
public final class GenerateVideos {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.VEO_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -64,6 +74,11 @@ public static void main(String[] args) {
System.out.println("Using Gemini Developer API");
}
+ // Optinoal: If the default 20MB limit is not enough for the generated video response, you can
+ // increase the limit via system property `genai.json.maxReadLength` or via this static method
+ // `JsonSerializable.setMaxReadLength`.
+ JsonSerializable.setMaxReadLength(100_000_000);
+
GenerateVideosConfig.Builder generateVideosConfigBuilder =
GenerateVideosConfig.builder().numberOfVideos(1);
@@ -71,20 +86,19 @@ public static void main(String[] args) {
generateVideosConfigBuilder.outputGcsUri("gs://genai-sdk-tests/tmp/videos");
}
GenerateVideosConfig generateVideosConfig = generateVideosConfigBuilder.build();
+ GenerateVideosSource generateVideosSource =
+ GenerateVideosSource.builder()
+ .prompt("A neon hologram of a cat driving at top speed")
+ .build();
GenerateVideosOperation generateVideosOperation =
- client.models.generateVideos(
- "veo-2.0-generate-001",
- "A neon hologram of a cat driving at top speed",
- null,
- generateVideosConfig);
+ client.models.generateVideos(modelId, generateVideosSource, generateVideosConfig);
// GenerateVideosOperation.done() is empty if the operation is not done.
while (!generateVideosOperation.done().filter(Boolean::booleanValue).isPresent()) {
try {
Thread.sleep(10000); // Sleep for 10 seconds.
- generateVideosOperation =
- client.operations.getVideosOperation(generateVideosOperation, null);
+ generateVideosOperation = client.operations.get(generateVideosOperation, null);
System.out.println("Waiting for operation to complete...");
} catch (InterruptedException e) {
System.out.println("Thread was interrupted while sleeping.");
@@ -98,11 +112,14 @@ public static void main(String[] args) {
Video generatedVideo =
generateVideosOperation.response().get().generatedVideos().get().get(0).video().get();
- try {
- client.files.download(generatedVideo, "video.mp4", null);
- System.out.println("Downloaded video to video.mp4");
- } catch (GenAiIOException e) {
- System.out.println("An error occurred while downloading the video: " + e.getMessage());
+
+ if (!client.vertexAI()) {
+ try {
+ client.files.download(generatedVideo, "video.mp4", null);
+ System.out.println("Downloaded video to video.mp4");
+ } catch (GenAiIOException e) {
+ System.out.println("An error occurred while downloading the video: " + e.getMessage());
+ }
}
}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateVideosAsync.java b/examples/src/main/java/com/google/genai/examples/GenerateVideosAsync.java
index 4897db41b92..55b99cfa087 100644
--- a/examples/src/main/java/com/google/genai/examples/GenerateVideosAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/GenerateVideosAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,12 +36,15 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.GenerateVideosAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
import com.google.genai.Client;
+import com.google.genai.JsonSerializable;
import com.google.genai.types.GenerateVideosConfig;
import com.google.genai.types.GenerateVideosOperation;
+import com.google.genai.types.GenerateVideosSource;
import com.google.genai.types.Video;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
@@ -49,6 +52,13 @@
/** An example of using the Unified Gen AI Java SDK to generate images asynchronously. */
public final class GenerateVideosAsync {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.VEO_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -65,6 +75,11 @@ public static void main(String[] args) {
System.out.println("Using Gemini Developer API");
}
+ // Optinoal: If the default 20MB limit is not enough for the generated video response, you can
+ // increase the limit via system property `genai.json.maxReadLength` or via this static method
+ // `JsonSerializable.setMaxReadLength`.
+ JsonSerializable.setMaxReadLength(100_000_000);
+
GenerateVideosConfig.Builder generateVideosConfigBuilder =
GenerateVideosConfig.builder().numberOfVideos(1);
@@ -72,13 +87,13 @@ public static void main(String[] args) {
generateVideosConfigBuilder.outputGcsUri("gs://genai-sdk-tests/tmp/videos");
}
GenerateVideosConfig generateVideosConfig = generateVideosConfigBuilder.build();
+ GenerateVideosSource generateVideosSource =
+ GenerateVideosSource.builder()
+ .prompt("A neon hologram of a cat driving at top speed")
+ .build();
CompletableFuture 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile exec:java
+ * -Dexec.mainClass="com.google.genai.examples.GenerateVideosEditOutpaint"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.JsonSerializable;
+import com.google.genai.types.GenerateVideosConfig;
+import com.google.genai.types.GenerateVideosOperation;
+import com.google.genai.types.GenerateVideosSource;
+import com.google.genai.types.Image;
+import com.google.genai.types.Video;
+import com.google.genai.types.VideoGenerationMask;
+import com.google.genai.types.VideoGenerationMaskMode;
+
+/** An example of using the Unified Gen AI Java SDK to edit a video with outpaint mode. */
+public final class GenerateVideosEditOutpaint {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = "veo-2.0-generate-preview"; // Only supported on preview model currently.
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
+ }
+
+ // Optinoal: If the default 20MB limit is not enough for the generated video response, you can
+ // increase the limit via system property `genai.json.maxReadLength` or via this static method
+ // `JsonSerializable.setMaxReadLength`.
+ JsonSerializable.setMaxReadLength(100_000_000);
+
+ VideoGenerationMask videoGenerationMask =
+ VideoGenerationMask.builder()
+ .image(
+ Image.builder()
+ .gcsUri("gs://genai-sdk-tests/inputs/videos/video_outpaint_mask.png")
+ .mimeType("image/png")
+ .build())
+ .maskMode(VideoGenerationMaskMode.Known.OUTPAINT)
+ .build();
+
+ GenerateVideosConfig generateVideosConfig =
+ GenerateVideosConfig.builder()
+ .numberOfVideos(1)
+ .outputGcsUri("gs://genai-sdk-tests/tmp/videos")
+ .aspectRatio("16:9")
+ .mask(videoGenerationMask)
+ .build();
+
+ GenerateVideosSource generateVideosSource =
+ GenerateVideosSource.builder()
+ .prompt("A neon hologram of a cat driving at top speed")
+ .video(
+ Video.builder()
+ .uri("gs://genai-sdk-tests/inputs/videos/editing_demo.mp4")
+ .mimeType("video/mp4")
+ .build())
+ .build();
+
+ GenerateVideosOperation generateVideosOperation =
+ client.models.generateVideos(modelId, generateVideosSource, generateVideosConfig);
+
+ // GenerateVideosOperation.done() is empty if the operation is not done.
+ while (!generateVideosOperation.done().filter(Boolean::booleanValue).isPresent()) {
+ try {
+ Thread.sleep(10000); // Sleep for 10 seconds.
+ generateVideosOperation =
+ client.operations.getVideosOperation(generateVideosOperation, null);
+ System.out.println("Waiting for operation to complete...");
+ } catch (InterruptedException e) {
+ System.out.println("Thread was interrupted while sleeping.");
+ Thread.currentThread().interrupt();
+ }
+ }
+ System.out.println(
+ "Generated "
+ + generateVideosOperation.response().get().generatedVideos().get().size()
+ + " video(s).");
+
+ Video generatedVideo =
+ generateVideosOperation.response().get().generatedVideos().get().get(0).video().get();
+ }
+
+ private GenerateVideosEditOutpaint() {}
+}
diff --git a/examples/src/main/java/com/google/genai/examples/GenerateVideosExtension.java b/examples/src/main/java/com/google/genai/examples/GenerateVideosExtension.java
new file mode 100644
index 00000000000..869e1d53ad4
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/GenerateVideosExtension.java
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile exec:java
+ * -Dexec.mainClass="com.google.genai.examples.GenerateVideosExtension" -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.JsonSerializable;
+import com.google.genai.errors.GenAiIOException;
+import com.google.genai.types.GenerateVideosConfig;
+import com.google.genai.types.GenerateVideosOperation;
+import com.google.genai.types.GenerateVideosSource;
+import com.google.genai.types.Video;
+
+/** An example of using the Unified Gen AI Java SDK to extend a video. */
+public final class GenerateVideosExtension {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.VEO_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Sample is only available for Gemini Developer API.");
+ return;
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+
+ // Optional: If the default 20MB limit is not enough for the generated video response, you can
+ // increase the limit via system property `genai.json.maxReadLength` or via this static method
+ // `JsonSerializable.setMaxReadLength`.
+ JsonSerializable.setMaxReadLength(100_000_000);
+
+ // Generate first video.
+ GenerateVideosConfig generateVideosConfig =
+ GenerateVideosConfig.builder().numberOfVideos(1).build();
+ GenerateVideosSource generateVideosSource =
+ GenerateVideosSource.builder()
+ .prompt("A neon hologram of a cat driving at top speed")
+ .build();
+
+ GenerateVideosOperation generateVideosOperation1 =
+ client.models.generateVideos(modelId, generateVideosSource, generateVideosConfig);
+
+ // GenerateVideosOperation.done() is empty if the operation is not done.
+ while (!generateVideosOperation1.done().filter(Boolean::booleanValue).isPresent()) {
+ try {
+ Thread.sleep(10000); // Sleep for 10 seconds.
+ generateVideosOperation1 =
+ client.operations.getVideosOperation(generateVideosOperation1, null);
+ System.out.println("Waiting for operation to complete...");
+ } catch (InterruptedException e) {
+ System.out.println("Thread was interrupted while sleeping.");
+ Thread.currentThread().interrupt();
+ }
+ }
+ System.out.println(
+ "Generated "
+ + generateVideosOperation1.response().get().generatedVideos().get().size()
+ + " video(s).");
+
+ Video generatedVideo1 =
+ generateVideosOperation1.response().get().generatedVideos().get().get(0).video().get();
+
+ if (!client.vertexAI()) {
+ try {
+ client.files.download(generatedVideo1, "video.mp4", null);
+ System.out.println("Downloaded video to video.mp4");
+ } catch (GenAiIOException e) {
+ System.out.println("An error occurred while downloading the video: " + e.getMessage());
+ }
+ }
+
+ // Extend the generated video.
+ GenerateVideosConfig generateVideosConfig2 =
+ GenerateVideosConfig.builder().numberOfVideos(1).build();
+ GenerateVideosSource generateVideosSource2 =
+ GenerateVideosSource.builder().prompt("Rain").video(generatedVideo1).build();
+
+ GenerateVideosOperation generateVideosOperation2 =
+ client.models.generateVideos(modelId, generateVideosSource2, generateVideosConfig2);
+
+ // GenerateVideosOperation.done() is empty if the operation is not done.
+ while (!generateVideosOperation2.done().filter(Boolean::booleanValue).isPresent()) {
+ try {
+ Thread.sleep(10000); // Sleep for 10 seconds.
+ generateVideosOperation2 =
+ client.operations.getVideosOperation(generateVideosOperation2, null);
+ System.out.println("Waiting for operation to complete...");
+ } catch (InterruptedException e) {
+ System.out.println("Thread was interrupted while sleeping.");
+ Thread.currentThread().interrupt();
+ }
+ }
+ System.out.println(
+ "Generated "
+ + generateVideosOperation2.response().get().generatedVideos().get().size()
+ + " video(s).");
+
+ Video generatedVideo2 =
+ generateVideosOperation2.response().get().generatedVideos().get().get(0).video().get();
+
+ if (!client.vertexAI()) {
+ try {
+ client.files.download(generatedVideo2, "video.mp4", null);
+ System.out.println("Downloaded extended video to video.mp4");
+ } catch (GenAiIOException e) {
+ System.out.println("An error occurred while downloading the video: " + e.getMessage());
+ }
+ }
+ }
+
+ private GenerateVideosExtension() {}
+}
diff --git a/examples/src/main/java/com/google/genai/examples/HttpOptionsExtraBody.java b/examples/src/main/java/com/google/genai/examples/HttpOptionsExtraBody.java
new file mode 100644
index 00000000000..c4fecf6ba01
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/HttpOptionsExtraBody.java
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.HttpOptionsExtraBody"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.genai.Client;
+import com.google.genai.types.GenerateContentConfig;
+import com.google.genai.types.GenerateContentResponse;
+import com.google.genai.types.HttpOptions;
+
+/**
+ * An example of using HttpOption extraBody to inject additional parameters to http request body.
+ */
+public final class HttpOptionsExtraBody {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API.
+ // It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used
+ // by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as
+ // well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or
+ // Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not
+ // available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+
+ GenerateContentResponse response =
+ client.models.generateContent(
+ modelId,
+ "What is your name?",
+ GenerateContentConfig.builder()
+ .httpOptions(
+ HttpOptions.builder()
+ .extraBody(
+ ImmutableMap.of(
+ "systemInstruction",
+ ImmutableMap.of(
+ "parts",
+ ImmutableList.of(
+ ImmutableMap.of("text", "You are a chatbot.")))))
+ .build())
+ .build());
+
+ System.out.println(
+ "GenerateContent prompt token count: " + response.usageMetadata().get().promptTokenCount());
+ }
+
+ private HttpOptionsExtraBody() {}
+}
diff --git a/examples/src/main/java/com/google/genai/examples/LiveAudioConversationAsync.java b/examples/src/main/java/com/google/genai/examples/LiveAudioConversationAsync.java
index 619f84f4291..e1a6c82e0a9 100644
--- a/examples/src/main/java/com/google/genai/examples/LiveAudioConversationAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/LiveAudioConversationAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -39,6 +39,7 @@
* mvn clean
*
* mvn compile exec:java -Dexec.mainClass="com.google.genai.examples.LiveAudioConversationAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*
* 3. Speak into the microphone. Press Ctrl+C to exit. Important: This example uses the system
* default audio input and output, which often won't include echo cancellation. So to prevent the
@@ -46,17 +47,22 @@
*/
package com.google.genai.examples;
-import com.google.common.collect.ImmutableList;
import com.google.genai.AsyncSession;
import com.google.genai.Client;
import com.google.genai.types.Blob;
+import com.google.genai.types.AutomaticActivityDetection;
+import com.google.genai.types.EndSensitivity;
import com.google.genai.types.LiveConnectConfig;
import com.google.genai.types.LiveSendRealtimeInputParameters;
import com.google.genai.types.LiveServerMessage;
import com.google.genai.types.Modality;
+import com.google.genai.types.RealtimeInputConfig;
import com.google.genai.types.PrebuiltVoiceConfig;
import com.google.genai.types.SpeechConfig;
+import com.google.genai.types.StartSensitivity;
import com.google.genai.types.VoiceConfig;
+import java.util.Collection;
+import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
@@ -84,7 +90,6 @@ public final class LiveAudioConversationAsync {
// --------------------------
private static volatile boolean running = true;
- private static volatile boolean speakerPlaying = false;
private static TargetDataLine microphoneLine;
private static SourceDataLine speakerLine;
private static AsyncSession session;
@@ -111,8 +116,7 @@ private static void sendMicrophoneAudio() {
while (running && microphoneLine != null && microphoneLine.isOpen()) {
bytesRead = microphoneLine.read(buffer, 0, buffer.length);
- if (bytesRead > 0 && !speakerPlaying) {
- // Create a copy of the buffer with the actual bytes read
+ if (bytesRead > 0) {
byte[] audioChunk = new byte[bytesRead];
System.arraycopy(buffer, 0, audioChunk, 0, bytesRead);
@@ -135,7 +139,6 @@ private static void sendMicrophoneAudio() {
}
public static void main(String[] args) throws LineUnavailableException {
-
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -152,6 +155,15 @@ public static void main(String[] args) throws LineUnavailableException {
System.out.println("Using Gemini Developer API");
}
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else if (client.vertexAI()) {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME;
+ } else {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME_PREVIEW;
+ }
+
// --- Audio Line Setup ---
microphoneLine = getMicrophoneLine();
speakerLine = getSpeakerLine();
@@ -161,7 +173,7 @@ public static void main(String[] args) throws LineUnavailableException {
String voiceName = "Aoede";
LiveConnectConfig config =
LiveConnectConfig.builder()
- .responseModalitiesFromKnown(ImmutableList.of(Modality.Known.AUDIO))
+ .responseModalities(Modality.Known.AUDIO)
.speechConfig(
SpeechConfig.builder()
.voiceConfig(
@@ -169,6 +181,14 @@ public static void main(String[] args) throws LineUnavailableException {
.prebuiltVoiceConfig(
PrebuiltVoiceConfig.builder().voiceName(voiceName)))
.languageCode("en-US"))
+ .realtimeInputConfig(
+ RealtimeInputConfig.builder()
+ .automaticActivityDetection(
+ AutomaticActivityDetection.builder()
+ .startOfSpeechSensitivity(StartSensitivity.Known.START_SENSITIVITY_HIGH)
+ .endOfSpeechSensitivity(EndSensitivity.Known.END_SENSITIVITY_HIGH)
+ .prefixPaddingMs(5)
+ .silenceDurationMs(100)))
.build();
// --- Shutdown Hook for Cleanup ---
@@ -209,11 +229,7 @@ public static void main(String[] args) throws LineUnavailableException {
// --- Connect to Gemini Live API ---
System.out.println("Connecting to Gemini Live API...");
- if (client.vertexAI()) {
- session = client.async.live.connect("gemini-2.0-flash-live-preview-04-09", config).get();
- } else {
- session = client.async.live.connect("gemini-2.0-flash-live-001", config).get();
- }
+ session = client.async.live.connect(modelId, config).get();
System.out.println("Connected.");
// --- Start Audio Lines ---
@@ -295,29 +311,35 @@ public static void handleAudioResponse(LiveServerMessage message) {
.serverContent()
.ifPresent(
content -> {
+ // Handle interruptions from Gemini.
+ if (content.interrupted().orElse(false)) {
+ speakerLine.flush();
+ return; // Skip processing the rest of this message's audio.
+ }
+
+ // Handle Model turn completion.
if (content.turnComplete().orElse(false)) {
- // when interrupted, Gemini sends a turn_compete with
- // Stop the speaker if the turn is complete
- if (speakerLine != null && speakerLine.isOpen()) {
- speakerLine.flush();
- }
- } else {
- content
- .modelTurn()
- .flatMap(modelTurn -> modelTurn.parts())
- .ifPresent(
- parts ->
- parts.forEach(
- part ->
- part.inlineData()
- .flatMap(Blob::data)
- .ifPresent(
- audioBytes -> {
- if (speakerLine != null && speakerLine.isOpen()) {
- // Write audio data to the speaker
- speakerLine.write(audioBytes, 0, audioBytes.length);
- }
- })));
+ // The turn is over, no more audio will be sent for this turn.
+ return;
+ }
+
+ // Process audio content for playback.
+ content.modelTurn().stream()
+ .flatMap(modelTurn -> modelTurn.parts().stream())
+ .flatMap(Collection::stream)
+ .map(part -> part.inlineData().flatMap(Blob::data))
+ .flatMap(Optional::stream)
+ .forEach(
+ audioBytes -> {
+ if (speakerLine != null && speakerLine.isOpen()) {
+ // Write audio data to the speaker
+ speakerLine.write(audioBytes, 0, audioBytes.length);
+ }
+ });
+
+ // If this is the last message of a generation, drain the buffer.
+ if (content.generationComplete().orElse(false)) {
+ speakerLine.drain();
}
});
}
diff --git a/examples/src/main/java/com/google/genai/examples/LiveEphemeralTokenAsync.java b/examples/src/main/java/com/google/genai/examples/LiveEphemeralTokenAsync.java
new file mode 100644
index 00000000000..b5c302e2cf2
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/LiveEphemeralTokenAsync.java
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.LiveEphemeralTokenAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.common.collect.ImmutableList;
+import com.google.genai.AsyncSession;
+import com.google.genai.Client;
+import com.google.genai.types.AuthToken;
+import com.google.genai.types.Content;
+import com.google.genai.types.CreateAuthTokenConfig;
+import com.google.genai.types.HttpOptions;
+import com.google.genai.types.LiveConnectConfig;
+import com.google.genai.types.LiveConnectConstraints;
+import com.google.genai.types.LiveSendClientContentParameters;
+import com.google.genai.types.LiveServerContent;
+import com.google.genai.types.LiveServerMessage;
+import com.google.genai.types.Modality;
+import com.google.genai.types.Part;
+import java.util.concurrent.CompletableFuture;
+
+/** Example of using the live module to send and receive text messages asynchronously. */
+public final class LiveEphemeralTokenAsync {
+
+ public static void main(String[] args) {
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client =
+ Client.builder().httpOptions(HttpOptions.builder().apiVersion("v1alpha").build()).build();
+
+ if (client.vertexAI()) {
+ System.out.println("Vertex AI API is not supported for this example.");
+ System.exit(0);
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+ System.out.println("Creating auth token...");
+
+ // Create an auth token for the live session.
+ AuthToken authToken =
+ client.authTokens.create(
+ CreateAuthTokenConfig.builder()
+ .uses(2)
+ .liveConnectConstraints(
+ LiveConnectConstraints.builder()
+ .model(Constants.GEMINI_LIVE_MODEL_NAME_PREVIEW)
+ .config(
+ LiveConnectConfig.builder()
+ .systemInstruction(
+ Content.fromParts(
+ Part.fromText(
+ "Answer questions like C-3PO from Star Wars would.")))
+ .responseModalities(Modality.Known.AUDIO)
+ .build())
+ .build())
+ .lockAdditionalFields(ImmutableList.of("topP"))
+ .build());
+ System.out.println("Created auth token: " + authToken.name());
+
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME_PREVIEW;
+ }
+
+ // Create a client using the ephemeral auth token.
+ if (authToken == null || authToken.name() == null) {
+ System.out.println("No auth token created.");
+ System.exit(0);
+ }
+ Client clientWithAuthToken =
+ Client.builder()
+ .apiKey(authToken.name().orElse(null))
+ .httpOptions(HttpOptions.builder().apiVersion("v1alpha").build())
+ .build();
+
+ // Note that the system instruction here is ignored by the server. The system instruction was
+ // set and locked in the LiveConnectConstraints of the CreateAuthTokenConfig. Here we are just
+ // demonstrating that here. Other unlocked fields (like temperature) can be configured here.
+ LiveConnectConfig config =
+ LiveConnectConfig.builder()
+ .systemInstruction(
+ Content.fromParts(
+ Part.fromText("You are a pirate. Answer all questions like a pirate would.")))
+ .build();
+
+ CompletableFuture export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -39,6 +39,7 @@
*
* mvn exec:java
* -Dexec.mainClass="com.google.genai.examples.LiveTextContextWindowCompressionAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -76,9 +77,19 @@ public static void main(String[] args) {
System.out.println("Using Gemini Developer API");
}
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else if (client.vertexAI()) {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME;
+ } else {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME_PREVIEW;
+ }
+
+ // Configures live session and context window compression.
LiveConnectConfig config =
LiveConnectConfig.builder()
- .responseModalitiesFromKnown(ImmutableList.of(Modality.Known.TEXT))
+ .responseModalities(Modality.Known.TEXT)
.contextWindowCompression(
ContextWindowCompressionConfig.builder()
.triggerTokens(1000L)
@@ -87,15 +98,7 @@ public static void main(String[] args) {
CompletableFuture export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,10 +38,10 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.LiveTextConversationAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
-import com.google.common.collect.ImmutableList;
import com.google.genai.AsyncSession;
import com.google.genai.Client;
import com.google.genai.types.Content;
@@ -75,9 +75,18 @@ public static void main(String[] args) {
System.out.println("Using Gemini Developer API");
}
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else if (client.vertexAI()) {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME;
+ } else {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME_PREVIEW;
+ }
+
LiveConnectConfig config =
LiveConnectConfig.builder()
- .responseModalitiesFromKnown(ImmutableList.of(Modality.Known.TEXT))
+ .responseModalities(Modality.Known.TEXT)
.topP(0.8f)
.seed(1234)
.build();
@@ -87,11 +96,7 @@ public static void main(String[] args) {
AsyncSession session;
try {
- if (client.vertexAI()) {
- session = client.async.live.connect("gemini-2.0-flash-live-preview-04-09", config).get();
- } else {
- session = client.async.live.connect("gemini-2.0-flash-live-001", config).get();
- }
+ session = client.async.live.connect(modelId, config).get();
// Start receiving messages.
CompletableFuture export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.LiveTextConversationResumptionAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*
* to resume a session, you can use the --session_handle argument to provide the session handle
* returned in the session resumption update from the server.
@@ -45,11 +46,10 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.LiveTextConversationResumptionAsync"
- * -Dexec.args="--session_handle=..."
+ * -Dexec.args="YOUR_MODEL_ID --session_handle=..."
*/
package com.google.genai.examples;
-import com.google.common.collect.ImmutableList;
import com.google.genai.AsyncSession;
import com.google.genai.Client;
import com.google.genai.types.Content;
@@ -62,6 +62,7 @@
import com.google.genai.types.Part;
import com.google.genai.types.SessionResumptionConfig;
import java.io.Console;
+import java.util.Collection;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
@@ -71,7 +72,18 @@ public final class LiveTextConversationResumptionAsync {
public static void main(String[] args) {
// Get the session handle from the command line, if provided
String sessionHandle = null;
- if (args.length > 0) {
+ if (args.length > 1) {
+ if (args[1].startsWith("--session_handle")) {
+ String[] parts = args[1].split("=", 2);
+ if (parts.length == 2) {
+ sessionHandle = parts[1];
+ } else if (parts.length == 1) {
+ System.err.println("Error: --session_handle requires a value.");
+ System.err.println("Usage: mvn ... --session_handle= export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,18 +38,15 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.LiveTextToAudioTranscriptionAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
-import com.google.common.collect.ImmutableList;
import com.google.genai.AsyncSession;
import com.google.genai.Client;
import com.google.genai.types.AudioTranscriptionConfig;
import com.google.genai.types.Content;
import com.google.genai.types.GoogleSearch;
-import com.google.genai.types.GroundingChunk;
-import com.google.genai.types.GroundingChunkWeb;
-import com.google.genai.types.GroundingMetadata;
import com.google.genai.types.LiveConnectConfig;
import com.google.genai.types.LiveSendClientContentParameters;
import com.google.genai.types.LiveServerContent;
@@ -59,7 +56,7 @@
import com.google.genai.types.SpeechConfig;
import com.google.genai.types.Tool;
import com.google.genai.types.Transcription;
-import java.util.List;
+import java.util.Collection;
import java.util.Optional;
import java.util.concurrent.CompletableFuture;
@@ -83,14 +80,24 @@ public static void main(String[] args) {
System.out.println("Using Gemini Developer API");
}
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else if (client.vertexAI()) {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME;
+ } else {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME_PREVIEW;
+ }
+
// Sets the system instruction in the config.
Content systemInstruction = Content.fromParts(Part.fromText("Answer in Japanese."));
// Sets the Google Search tool in the config.
Tool googleSearchTool = Tool.builder().googleSearch(GoogleSearch.builder()).build();
+ // Configures live session and audio transcription.
LiveConnectConfig config =
LiveConnectConfig.builder()
- .responseModalitiesFromKnown(ImmutableList.of(Modality.Known.AUDIO))
+ .responseModalities(Modality.Known.AUDIO)
.outputAudioTranscription(AudioTranscriptionConfig.builder())
.systemInstruction(systemInstruction)
.speechConfig(SpeechConfig.builder().languageCode("ja-JP"))
@@ -99,14 +106,7 @@ public static void main(String[] args) {
CompletableFuture export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,10 +38,10 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.LiveTextToTextGenerationAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
-import com.google.common.collect.ImmutableList;
import com.google.genai.AsyncSession;
import com.google.genai.Client;
import com.google.genai.types.Content;
@@ -73,26 +73,28 @@ public static void main(String[] args) {
System.out.println("Using Gemini Developer API");
}
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else if (client.vertexAI()) {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME;
+ } else {
+ modelId = Constants.GEMINI_LIVE_MODEL_NAME_PREVIEW;
+ }
+
LiveConnectConfig config =
- LiveConnectConfig.builder()
- .responseModalitiesFromKnown(ImmutableList.of(Modality.Known.TEXT))
- .build();
+ LiveConnectConfig.builder().responseModalities(Modality.Known.TEXT).build();
CompletableFuture 1. Compile the java package and run the sample code.
+ *
+ * mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.LocalComputeTokens"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.LocalTokenizer;
+
+/** An example of using the Unified Gen AI Java SDK to compute tokens locally. */
+public class LocalComputeTokens {
+ public static void main(String[] args) {
+ LocalTokenizer tokenizer = new LocalTokenizer(Constants.GEMINI_MODEL_NAME);
+ System.out.println(
+ "Compute tokens for 'Hello world': " + tokenizer.computeTokens("Hello world").toJson());
+ }
+}
diff --git a/examples/src/main/java/com/google/genai/examples/LocalCountTokens.java b/examples/src/main/java/com/google/genai/examples/LocalCountTokens.java
new file mode 100644
index 00000000000..9e1c0fc425b
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/LocalCountTokens.java
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1. Compile the java package and run the sample code.
+ *
+ * mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.LocalCountTokens"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.LocalTokenizer;
+
+/** An example of using the Unified Gen AI Java SDK to count tokens locally. */
+public class LocalCountTokens {
+ public static void main(String[] args) {
+ LocalTokenizer tokenizer = new LocalTokenizer(Constants.GEMINI_MODEL_NAME);
+ System.out.println(
+ "Count for 'Hello world': " + tokenizer.countTokens("Hello world").totalTokens());
+ }
+}
diff --git a/examples/src/main/java/com/google/genai/examples/ModelManagement.java b/examples/src/main/java/com/google/genai/examples/ModelManagement.java
index d10595fd45c..0c51973a64e 100644
--- a/examples/src/main/java/com/google/genai/examples/ModelManagement.java
+++ b/examples/src/main/java/com/google/genai/examples/ModelManagement.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -49,13 +49,13 @@
public final class ModelManagement {
public static void main(String[] args) {
- if (args.length == 0) {
- System.out.println("Please provide a model ID on the -Dexec.args argument.");
- return;
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
}
- String modelId = args[0];
-
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
diff --git a/examples/src/main/java/com/google/genai/examples/ModelManagementAsync.java b/examples/src/main/java/com/google/genai/examples/ModelManagementAsync.java
index 08965e3cdd9..a310cc28d68 100644
--- a/examples/src/main/java/com/google/genai/examples/ModelManagementAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/ModelManagementAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -51,13 +51,13 @@
public final class ModelManagementAsync {
public static void main(String[] args) {
- if (args.length == 0) {
- System.out.println("Please provide a model ID on the -Dexec.args argument.");
- return;
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
}
- String modelId = args[0];
-
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
diff --git a/examples/src/main/java/com/google/genai/examples/RecontextImageVirtualTryOn.java b/examples/src/main/java/com/google/genai/examples/RecontextImageVirtualTryOn.java
new file mode 100644
index 00000000000..28e12fb1ccc
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/RecontextImageVirtualTryOn.java
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.RecontextImageVirtualTryOn"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.types.Image;
+import com.google.genai.types.ProductImage;
+import com.google.genai.types.RecontextImageConfig;
+import com.google.genai.types.RecontextImageResponse;
+import com.google.genai.types.RecontextImageSource;
+import java.util.ArrayList;
+
+/** An example of using the Unified Gen AI Java SDK to recontextualize an image (virtual try-on). */
+public final class RecontextImageVirtualTryOn {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.VIRTUAL_TRY_ON_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
+ }
+
+ Image productImagePants =
+ Image.builder().gcsUri("gs://genai-sdk-tests/inputs/images/pants.jpg").build();
+
+ Image personImage =
+ Image.builder().gcsUri("gs://genai-sdk-tests/inputs/images/man.jpg").build();
+
+ RecontextImageConfig recontextImageConfig =
+ RecontextImageConfig.builder().numberOfImages(1).outputMimeType("image/jpeg").build();
+
+ ArrayList export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -38,6 +38,7 @@
* mvn clean compile
*
* mvn exec:java -Dexec.mainClass="com.google.genai.examples.RequestLevelHttpOptions"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -46,10 +47,18 @@
import com.google.genai.types.GenerateContentConfig;
import com.google.genai.types.GenerateContentResponse;
import com.google.genai.types.HttpOptions;
+import com.google.genai.types.HttpRetryOptions;
/** An example of setting http options at request level. */
public final class RequestLevelHttpOptions {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -66,14 +75,17 @@ public static void main(String[] args) {
System.out.println("Using Gemini Developer API");
}
- // Set a customized header per request config.
+ // Set a customized header and retry options per request config.
GenerateContentConfig config =
GenerateContentConfig.builder()
- .httpOptions(HttpOptions.builder().headers(ImmutableMap.of("my-header", "my-value")))
+ .httpOptions(
+ HttpOptions.builder()
+ .headers(ImmutableMap.of("my-header", "my-value"))
+ .retryOptions(HttpRetryOptions.builder().attempts(3).httpStatusCodes(408, 429)))
.build();
GenerateContentResponse response =
- client.models.generateContent("gemini-2.0-flash-001", "Tell me the history of LLM", config);
+ client.models.generateContent(modelId, "Tell me the history of LLM", config);
System.out.println("Response: " + response.text());
}
diff --git a/examples/src/main/java/com/google/genai/examples/SegmentImage.java b/examples/src/main/java/com/google/genai/examples/SegmentImage.java
new file mode 100644
index 00000000000..9adcf6f93a3
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/SegmentImage.java
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.SegmentImage"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.types.Image;
+import com.google.genai.types.SegmentImageConfig;
+import com.google.genai.types.SegmentImageResponse;
+import com.google.genai.types.SegmentImageSource;
+import com.google.genai.types.SegmentMode;
+
+/** An example of using the Unified Gen AI Java SDK to segment an image. */
+public final class SegmentImage {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.SEGMENT_IMAGE_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
+ }
+
+ // Base image created using generateImages with prompt:
+ // "A square, circle, and triangle with a white background"
+ Image image = Image.fromFile("./resources/shapes.jpg");
+
+ // Control reference.
+ SegmentImageConfig segmentImageConfig =
+ SegmentImageConfig.builder().mode(SegmentMode.Known.FOREGROUND).build();
+
+ SegmentImageResponse segmentImageResponse =
+ client.models.segmentImage(
+ modelId, SegmentImageSource.builder().image(image).build(), segmentImageConfig);
+
+ Image maskImage = segmentImageResponse.generatedMasks().get().get(0).mask().get();
+ // Do something with maskImage.
+ }
+
+ private SegmentImage() {}
+}
diff --git a/examples/src/main/java/com/google/genai/examples/SegmentImageAsync.java b/examples/src/main/java/com/google/genai/examples/SegmentImageAsync.java
new file mode 100644
index 00000000000..32ffead7e76
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/SegmentImageAsync.java
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.SegmentImage"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.types.Image;
+import com.google.genai.types.SegmentImageConfig;
+import com.google.genai.types.SegmentImageResponse;
+import com.google.genai.types.SegmentImageSource;
+import com.google.genai.types.SegmentMode;
+import java.util.concurrent.CompletableFuture;
+
+/** An example of using the Unified Gen AI Java SDK to segment an image asynchronously. */
+public final class SegmentImageAsync {
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.SEGMENT_IMAGE_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ } else {
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
+ }
+
+ // Base image created using generateImages with prompt:
+ // "A square, circle, and triangle with a white background"
+ Image image = Image.fromFile("./resources/shapes.jpg");
+
+ // Control reference.
+ SegmentImageConfig segmentImageConfig =
+ SegmentImageConfig.builder().mode(SegmentMode.Known.FOREGROUND).build();
+
+ CompletableFuture 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.TuningJobs"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.Client;
+import com.google.genai.types.ListTuningJobsConfig;
+import com.google.genai.types.TuningDataset;
+import com.google.genai.types.TuningJob;
+
+/** An example of using the Unified Gen AI Java SDK to do operations on tuning jobs. */
+public final class TuningJobs {
+
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ // Create a tuning job.
+ TuningDataset tuningDataset =
+ TuningDataset.builder()
+ .gcsUri(
+ "gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl")
+ .build();
+ TuningJob tuningJob1 = client.tunings.tune(modelId, tuningDataset, null);
+ System.out.println("Created tuning job: " + tuningJob1);
+ // Get the tuning job by name.
+ TuningJob tuningJob2 = client.tunings.get(tuningJob1.name().get(), null);
+ System.out.println("Get tuning job: " + tuningJob2);
+
+ // Wait for the tuned model to be available.
+ String tunedModel = "";
+ while (tunedModel.isEmpty()) {
+ System.out.println("Waiting for tuned model to be available");
+ try {
+ Thread.sleep(10000); // Sleep for 10 seconds.
+ } catch (InterruptedException e) {
+ System.out.println("Thread was interrupted while sleeping.");
+ Thread.currentThread().interrupt();
+ }
+ // Get the tuning job.
+ TuningJob fetchedTuningJob = client.tunings.get(tuningJob1.name().get(), null);
+ if (fetchedTuningJob.tunedModel().isPresent()
+ && fetchedTuningJob.tunedModel().get().model().isPresent()) {
+ tunedModel = fetchedTuningJob.tunedModel().get().model().get();
+ }
+ }
+ System.out.println("Tuned model: " + tunedModel);
+ System.out.println();
+ } else {
+ System.out.println("Using Gemini Developer API");
+ }
+
+ // List tuning jobs.
+ System.out.println("List tuning jobs resource names: ");
+ for (TuningJob t :
+ client.tunings.list(ListTuningJobsConfig.builder().pageSize(5).build()).page()) {
+ System.out.println(t.name().get());
+ System.out.println(t.state().get());
+ }
+ }
+
+ private TuningJobs() {}
+}
diff --git a/examples/src/main/java/com/google/genai/examples/TuningJobsAsync.java b/examples/src/main/java/com/google/genai/examples/TuningJobsAsync.java
new file mode 100644
index 00000000000..442a062ba2a
--- /dev/null
+++ b/examples/src/main/java/com/google/genai/examples/TuningJobsAsync.java
@@ -0,0 +1,163 @@
+/*
+ * Copyright 2025 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Usage:
+ *
+ * 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.TuningJobsAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.genai.AsyncPager;
+import com.google.genai.Client;
+import com.google.genai.types.ListTuningJobsConfig;
+import com.google.genai.types.TuningDataset;
+import com.google.genai.types.TuningJob;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+/** An example of using the Unified Gen AI Java SDK to do async operations on tuning jobs. */
+public final class TuningJobsAsync {
+
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+ Client client = new Client();
+ ScheduledExecutorService scheduler = Executors.newSingleThreadScheduledExecutor();
+
+ try {
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ // Create a tuning job.
+ TuningDataset tuningDataset =
+ TuningDataset.builder()
+ .gcsUri(
+ "gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl")
+ .build();
+ CompletableFuture 1a. If you are using Vertex AI, setup ADC to get credentials:
+ * https://cloud.google.com/docs/authentication/provide-credentials-adc#google-idp
+ *
+ * Then set Project, Location, and USE_VERTEXAI flag as environment variables:
+ *
+ * export GOOGLE_CLOUD_PROJECT=YOUR_PROJECT
+ *
+ * export GOOGLE_CLOUD_LOCATION=YOUR_LOCATION
+ *
+ * export GOOGLE_GENAI_USE_VERTEXAI=true
+ *
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
+ * list of available API keys here: https://aistudio.google.com/app/apikey
+ *
+ * export GOOGLE_API_KEY=YOUR_API_KEY
+ *
+ * 2. Compile the java package and run the sample code.
+ *
+ * mvn clean compile
+ *
+ * mvn exec:java -Dexec.mainClass="com.google.genai.examples.TuningJobs"
+ * -Dexec.args="YOUR_MODEL_ID"
+ */
+package com.google.genai.examples;
+
+import com.google.common.collect.ImmutableList;
+import com.google.genai.Client;
+import com.google.genai.types.AutoraterConfig;
+import com.google.genai.types.CreateTuningJobConfig;
+import com.google.genai.types.EvaluationConfig;
+import com.google.genai.types.GcsDestination;
+import com.google.genai.types.ListTuningJobsConfig;
+import com.google.genai.types.UnifiedMetric;
+import com.google.genai.types.BleuSpec;
+import com.google.genai.types.OutputConfig;
+import com.google.genai.types.TuningDataset;
+import com.google.genai.types.TuningJob;
+import com.google.genai.types.TuningValidationDataset;
+import com.google.genai.types.HttpOptions;
+
+/**
+ * An example of using the Unified Gen AI Java SDK to do operations on tuning jobs with an
+ * evaluation config.
+ */
+public final class TuningJobsWithEvaluationConfig {
+
+ public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.GEMINI_MODEL_NAME;
+ }
+
+ // Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
+ // key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
+ // environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
+ // `GOOGLE_GENAI_USE_VERTEXAI` to "true".
+ //
+ // Note: Some services are only available in a specific API backend (Gemini or Vertex), you will
+ // get a `UnsupportedOperationException` if you try to use a service that is not available in
+ // the backend you are using.
+
+ // Tuning is currently only supported in v1beta1.
+ HttpOptions httpOptions = HttpOptions.builder().apiVersion("v1beta1").build();
+ Client client = Client.builder().httpOptions(httpOptions).build();
+
+ if (client.vertexAI()) {
+ System.out.println("Using Vertex AI");
+ // Create a tuning job.
+ TuningDataset tuningDataset =
+ TuningDataset.builder()
+ .gcsUri(
+ "gs://cloud-samples-data/ai-platform/generative_ai/gemini-1_5/text/sft_train_data.jsonl")
+ .build();
+
+ UnifiedMetric bleu =
+ UnifiedMetric.builder()
+ .bleuSpec(BleuSpec.builder().useEffectiveOrder(true).build())
+ .build();
+
+ ImmutableList export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.UpscaleImage"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -47,6 +48,13 @@
/** An example of using the Unified Gen AI Java SDK to upscale an image. */
public final class UpscaleImage {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.IMAGEN_CAPABILITY_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -60,7 +68,8 @@ public static void main(String[] args) {
if (client.vertexAI()) {
System.out.println("Using Vertex AI");
} else {
- System.out.println("Using Gemini Developer API");
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
}
// Base image created using generateImages with prompt:
@@ -69,10 +78,14 @@ public static void main(String[] args) {
UpscaleImageResponse upscaleImageResponse =
client.models.upscaleImage(
- "imagen-3.0-generate-001",
+ modelId,
image,
"x2",
- UpscaleImageConfig.builder().outputMimeType("image/jpeg").build());
+ UpscaleImageConfig.builder()
+ .outputMimeType("image/jpeg")
+ .enhanceInputImage(true)
+ .imagePreservationFactor(0.6f)
+ .build());
Image upscaledImage = upscaleImageResponse.generatedImages().get().get(0).image().get();
// Do something with upscaledImage.
diff --git a/examples/src/main/java/com/google/genai/examples/UpscaleImageAsync.java b/examples/src/main/java/com/google/genai/examples/UpscaleImageAsync.java
index b71f4d69ab3..df3e099c881 100644
--- a/examples/src/main/java/com/google/genai/examples/UpscaleImageAsync.java
+++ b/examples/src/main/java/com/google/genai/examples/UpscaleImageAsync.java
@@ -28,7 +28,7 @@
*
* export GOOGLE_GENAI_USE_VERTEXAI=true
*
- * 1b. If you are using Gemini Developer AI, set an API key environment variable. You can find a
+ * 1b. If you are using Gemini Developer API, set an API key environment variable. You can find a
* list of available API keys here: https://aistudio.google.com/app/apikey
*
* export GOOGLE_API_KEY=YOUR_API_KEY
@@ -36,6 +36,7 @@
* 2. Compile the java package and run the sample code.
*
* mvn clean compile exec:java -Dexec.mainClass="com.google.genai.examples.UpscaleImageAsync"
+ * -Dexec.args="YOUR_MODEL_ID"
*/
package com.google.genai.examples;
@@ -48,6 +49,13 @@
/** An example of using the Unified Gen AI Java SDK to upscale an image asynchronously. */
public final class UpscaleImageAsync {
public static void main(String[] args) {
+ final String modelId;
+ if (args.length != 0) {
+ modelId = args[0];
+ } else {
+ modelId = Constants.IMAGEN_CAPABILITY_MODEL_NAME;
+ }
+
// Instantiate the client. The client by default uses the Gemini Developer API. It gets the API
// key from the environment variable `GOOGLE_API_KEY`. Vertex AI API can be used by setting the
// environment variables `GOOGLE_CLOUD_LOCATION` and `GOOGLE_CLOUD_PROJECT`, as well as setting
@@ -61,7 +69,8 @@ public static void main(String[] args) {
if (client.vertexAI()) {
System.out.println("Using Vertex AI");
} else {
- System.out.println("Using Gemini Developer API");
+ System.out.println("Gemini Developer API is not supported for this example.");
+ System.exit(0);
}
// Base image created using generateImages with prompt:
@@ -70,7 +79,7 @@ public static void main(String[] args) {
CompletableFuture> groundingChunksOptional =
- groundingMetadata.groundingChunks();
-
- if (groundingChunksOptional.isPresent()) {
- List