Skip to content

Commit 871fdee

Browse files
authored
small code improvement
1 parent ccf9845 commit 871fdee

1 file changed

Lines changed: 5 additions & 5 deletions

File tree

tutorials/ai-core-orchestration-consumption/ai-core-orchestration-consumption.md

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1083,18 +1083,18 @@ This step outlines the process of generating responses for a set of queries usin
10831083
var client = new OrchestrationClient(new AiCoreService()
10841084
.getInferenceDestination(RESOURCE_GROUP).forScenario("orchestration"));
10851085

1086+
// Create orchestration module configuration
1087+
var moduleConfig = new OrchestrationModuleConfig();
1088+
10861089
// A list to store all responses from the different models
10871090
var responses = new ArrayList<Map>();
10881091

10891092
// Iterate through the list of models
10901093
for (var model: models) {
10911094
System.out.println("\n=== Responses for model: %s ===\n".formatted(model.getName()));
10921095

1093-
// Create orchestration module configuration for current model
1094-
var moduleConfig = new OrchestrationModuleConfig().withLlmConfig(model);
1095-
1096-
// Prompt model with orchestration module configuration
1097-
var response = client.chatCompletion(prompt, moduleConfig);
1096+
// Prompt LLM with specific LLM config for model
1097+
var response = client.chatCompletion(prompt, moduleConfig.withLlmConfig(model));
10981098

10991099
// Add response to list of all model responses
11001100
responses.add(Map.of("model", model.getName(), "response", response.getContent()));

0 commit comments

Comments
 (0)