From a11686d0e6f5d74ca035b22db1c0d66c30eeaede Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Sun, 3 May 2020 17:34:57 +0530 Subject: [PATCH 01/40] Secondary Storage Management --- .../main/java/com/cloud/event/EventTypes.java | 1 + .../com/cloud/storage/ImageStoreService.java | 29 + .../com/cloud/storage/StorageService.java | 2 + .../apache/cloudstack/api/ApiConstants.java | 5 + .../org/apache/cloudstack/api/BaseCmd.java | 51 +- .../admin/storage/ListImageStoresCmd.java | 10 +- .../MigrateSecondaryStorageDataCmd.java | 138 ++++ .../admin/storage/UpdateImageStoreCmd.java | 95 +++ .../api/response/ImageStoreResponse.java | 14 +- .../api/response/MigrationResponse.java | 56 ++ .../api/response/TemplateResponse.java | 9 + .../service/StorageOrchestrationService.java | 30 + .../api/storage/DataStoreManager.java | 8 + .../api/storage/EndPointSelector.java | 2 + .../ObjectInDataStoreStateMachine.java | 5 + .../api/storage/SecondaryStorageService.java | 43 ++ .../subsystem/api/storage/SnapshotInfo.java | 4 + .../orchestration/StorageOrchestrator.java | 671 ++++++++++++++++++ .../orchestration/VolumeOrchestrator.java | 1 - ...ring-engine-orchestration-core-context.xml | 3 + .../main/java/com/cloud/host/dao/HostDao.java | 2 + .../java/com/cloud/host/dao/HostDaoImpl.java | 7 + .../cloud/secstorage/CommandExecLogDao.java | 1 + .../secstorage/CommandExecLogDaoImpl.java | 19 +- .../storage/datastore/db/ImageStoreDao.java | 2 + .../datastore/db/ImageStoreDaoImpl.java | 16 + .../storage/datastore/db/ImageStoreVO.java | 11 + .../META-INF/db/schema-41310to41400.sql | 27 + .../storage/motion/DataMotionServiceImpl.java | 13 +- .../storage/image/StorageServiceImpl.java | 165 +++++ .../image/TemplateDataFactoryImpl.java | 11 +- .../storage/image/TemplateServiceImpl.java | 3 +- .../ImageStoreProviderManagerImpl.java | 35 + .../storage/image/store/TemplateObject.java | 4 +- ...ring-engine-storage-image-core-context.xml | 6 + .../storage/snapshot/SnapshotObject.java | 23 +- .../datastore/DataStoreManagerImpl.java | 32 +- .../ObjectInDataStoreManagerImpl.java | 8 + .../endpoint/DefaultEndPointSelector.java | 23 +- .../image/BaseImageStoreDriverImpl.java | 116 ++- .../datastore/ImageStoreProviderManager.java | 9 + .../image/db/TemplateDataStoreDaoImpl.java | 12 +- .../storage/volume/VolumeObject.java | 8 +- .../storage/volume/VolumeServiceImpl.java | 1 - .../framework/jobs/dao/AsyncJobDaoImpl.java | 4 +- server/pom.xml | 5 + .../com/cloud/api/query/QueryManagerImpl.java | 10 +- .../cloud/api/query/ViewResponseHelper.java | 8 +- .../api/query/dao/ImageStoreJoinDaoImpl.java | 1 + .../api/query/dao/TemplateJoinDaoImpl.java | 25 +- .../cloud/api/query/vo/ImageStoreJoinVO.java | 7 + .../java/com/cloud/configuration/Config.java | 22 +- .../cloud/server/ManagementServerImpl.java | 4 + .../cloud/storage/ImageStoreDetailsUtil.java | 1 - .../cloud/storage/ImageStoreServiceImpl.java | 123 ++++ .../com/cloud/storage/StorageManagerImpl.java | 14 +- .../storage/download/DownloadListener.java | 2 +- .../secondary/SecondaryStorageVmManager.java | 4 + .../template/HypervisorTemplateAdapter.java | 42 +- .../cloud/template/TemplateManagerImpl.java | 2 +- .../diagnostics/DiagnosticsServiceImpl.java | 2 +- .../spring-server-core-managers-context.xml | 2 + .../PremiumSecondaryStorageManagerImpl.java | 70 +- .../SecondaryStorageManagerImpl.java | 11 +- .../resource/NfsSecondaryStorageResource.java | 109 ++- tools/apidoc/gen_toc.py | 1 + 66 files changed, 2074 insertions(+), 126 deletions(-) create mode 100644 api/src/main/java/com/cloud/storage/ImageStoreService.java create mode 100644 api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java create mode 100644 api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java create mode 100644 api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java create mode 100644 engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java create mode 100644 engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SecondaryStorageService.java create mode 100644 engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java create mode 100644 engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java create mode 100644 server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 30b6ac0b0a17..69044738f7fb 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -229,6 +229,7 @@ public class EventTypes { public static final String EVENT_TEMPLATE_EXTRACT = "TEMPLATE.EXTRACT"; public static final String EVENT_TEMPLATE_UPLOAD = "TEMPLATE.UPLOAD"; public static final String EVENT_TEMPLATE_CLEANUP = "TEMPLATE.CLEANUP"; + public static final String EVENT_TEMPLATE_MIGRATE = "TEMPLATE.MIGRATE"; // Volume Events public static final String EVENT_VOLUME_CREATE = "VOLUME.CREATE"; diff --git a/api/src/main/java/com/cloud/storage/ImageStoreService.java b/api/src/main/java/com/cloud/storage/ImageStoreService.java new file mode 100644 index 000000000000..049a9f807a97 --- /dev/null +++ b/api/src/main/java/com/cloud/storage/ImageStoreService.java @@ -0,0 +1,29 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.storage; + +import org.apache.cloudstack.api.command.admin.storage.MigrateSecondaryStorageDataCmd; +import org.apache.cloudstack.api.response.MigrationResponse; + +public interface ImageStoreService { + + public static enum MigrationPolicy { + Balance, Complete + } + MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd); +} diff --git a/api/src/main/java/com/cloud/storage/StorageService.java b/api/src/main/java/com/cloud/storage/StorageService.java index aebbbcd4bd04..207fc8f0cd7a 100644 --- a/api/src/main/java/com/cloud/storage/StorageService.java +++ b/api/src/main/java/com/cloud/storage/StorageService.java @@ -102,4 +102,6 @@ public interface StorageService { */ ImageStore migrateToObjectStore(String name, String url, String providerName, Map details) throws DiscoveryException; + ImageStore updateImageStoreStatus(Long id, Boolean readonly); + } diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 2a201dba196a..7d151d0ceb76 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -17,6 +17,7 @@ package org.apache.cloudstack.api; public class ApiConstants { + public static final String TEMP = "temp"; public static final String ACCOUNT = "account"; public static final String ACCOUNTS = "accounts"; public static final String ACCOUNT_TYPE = "accounttype"; @@ -113,6 +114,7 @@ public class ApiConstants { public static final String DISK_IO_WRITE = "diskiowrite"; public static final String DISK_IO_PSTOTAL = "diskiopstotal"; public static final String DISK_SIZE = "disksize"; + public static final String DOWNLOAD_DETAILS = "downloaddetails"; public static final String UTILIZATION = "utilization"; public static final String DRIVER = "driver"; public static final String ROOT_DISK_SIZE = "rootdisksize"; @@ -234,6 +236,7 @@ public class ApiConstants { public static final String MAX_MEMORY = "maxmemory"; public static final String MIN_CPU_NUMBER = "mincpunumber"; public static final String MIN_MEMORY = "minmemory"; + public static final String MIGRATION_TYPE = "migrationtype"; public static final String MEMORY = "memory"; public static final String MODE = "mode"; public static final String KEEPALIVE_ENABLED = "keepaliveenabled"; @@ -351,6 +354,7 @@ public class ApiConstants { public static final String TARGET_IQN = "targetiqn"; public static final String TEMPLATE_FILTER = "templatefilter"; public static final String TEMPLATE_ID = "templateid"; + public static final String TEMPLATE_IDS = "templateids"; public static final String ISO_ID = "isoid"; public static final String TIMEOUT = "timeout"; public static final String TIMEZONE = "timezone"; @@ -777,6 +781,7 @@ public class ApiConstants { public static final String EXITCODE = "exitcode"; public static final String TARGET_ID = "targetid"; public static final String FILES = "files"; + public static final String FROM = "from"; public static final String VOLUME_IDS = "volumeids"; public static final String ROUTER_ID = "routerid"; diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java index 37dbeaab841a..ad173a79ce8c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java @@ -17,6 +17,31 @@ package org.apache.cloudstack.api; +import java.lang.reflect.Field; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.regex.Pattern; + +import javax.inject.Inject; + +import org.apache.cloudstack.acl.RoleService; +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.affinity.AffinityGroupService; +import org.apache.cloudstack.alert.AlertService; +import org.apache.cloudstack.annotation.AnnotationService; +import org.apache.cloudstack.context.CallContext; +import org.apache.cloudstack.network.element.InternalLoadBalancerElementService; +import org.apache.cloudstack.network.lb.ApplicationLoadBalancerService; +import org.apache.cloudstack.network.lb.InternalLoadBalancerVMService; +import org.apache.cloudstack.query.QueryService; +import org.apache.cloudstack.usage.UsageService; +import org.apache.log4j.Logger; + import com.cloud.configuration.ConfigurationService; import com.cloud.exception.ConcurrentOperationException; import com.cloud.exception.InsufficientCapacityException; @@ -44,6 +69,7 @@ import com.cloud.server.ResourceMetaDataService; import com.cloud.server.TaggedResourceService; import com.cloud.storage.DataStoreProviderApiService; +import com.cloud.storage.ImageStoreService; import com.cloud.storage.StorageService; import com.cloud.storage.VolumeApiService; import com.cloud.storage.snapshot.SnapshotApiService; @@ -58,29 +84,6 @@ import com.cloud.utils.db.UUIDManager; import com.cloud.vm.UserVmService; import com.cloud.vm.snapshot.VMSnapshotService; -import org.apache.cloudstack.acl.RoleService; -import org.apache.cloudstack.acl.RoleType; -import org.apache.cloudstack.affinity.AffinityGroupService; -import org.apache.cloudstack.alert.AlertService; -import org.apache.cloudstack.annotation.AnnotationService; -import org.apache.cloudstack.context.CallContext; -import org.apache.cloudstack.network.element.InternalLoadBalancerElementService; -import org.apache.cloudstack.network.lb.ApplicationLoadBalancerService; -import org.apache.cloudstack.network.lb.InternalLoadBalancerVMService; -import org.apache.cloudstack.query.QueryService; -import org.apache.cloudstack.usage.UsageService; -import org.apache.log4j.Logger; - -import javax.inject.Inject; -import java.lang.reflect.Field; -import java.text.DateFormat; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Date; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.regex.Pattern; public abstract class BaseCmd { private static final Logger s_logger = Logger.getLogger(BaseCmd.class.getName()); @@ -126,6 +129,8 @@ public static enum CommandType { @Inject public TemplateApiService _templateService; @Inject + public ImageStoreService _imageStoreService; + @Inject public SecurityGroupService _securityGroupService; @Inject public SnapshotApiService _snapshotService; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java index 8c37c78c7632..6d11983c1420 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java @@ -16,8 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.storage; -import org.apache.log4j.Logger; - import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseListCmd; @@ -25,6 +23,7 @@ import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.ZoneResponse; +import org.apache.log4j.Logger; @APICommand(name = "listImageStores", description = "Lists image stores.", responseObject = ImageStoreResponse.class, since = "4.2.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) @@ -52,6 +51,9 @@ public class ListImageStoresCmd extends BaseListCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, description = "the ID of the storage pool") private Long id; + @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, entityType = ImageStoreResponse.class, description = "read-only status of the image store") + private Boolean readonly; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -80,6 +82,10 @@ public void setProvider(String provider) { this.provider = provider; } + public Boolean getReadonly() { + return readonly; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java new file mode 100644 index 000000000000..7e97903bea54 --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java @@ -0,0 +1,138 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.storage; + +import java.util.List; + +import org.apache.cloudstack.acl.RoleType; +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.BaseAsyncCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ImageStoreResponse; +import org.apache.cloudstack.api.response.MigrationResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.event.EventTypes; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.utils.StringUtils; + +@APICommand(name = MigrateSecondaryStorageDataCmd.APINAME, + description = "migrates templates from one secondary storage to destination image store", + responseObject = MigrationResponse.class, + requestHasSensitiveInfo = false, + responseHasSensitiveInfo = false, + since = "4.14.0", + authorized = {RoleType.Admin}) +public class MigrateSecondaryStorageDataCmd extends BaseAsyncCmd { + + public static final Logger s_logger = Logger.getLogger(MigrateSecondaryStorageDataCmd.class.getName()); + + public static final String APINAME = "migrateSecondaryStorageData"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + + @Parameter(name = ApiConstants.FROM, + type = CommandType.UUID, + entityType = ImageStoreResponse.class, + description = "id of the image store from where the data is to be migrated") + private Long id; + + @Parameter(name = ApiConstants.MIGRATE_TO, + type = CommandType.LIST, + collectionType = CommandType.UUID, + entityType = ImageStoreResponse.class, + description = "id of the destination secondary storage pool to which the templates are to be migrated to") + private List migrateTo; + + @Parameter(name = ApiConstants.MIGRATION_TYPE, + type = CommandType.STRING, + description = "partial: if you want data to be distributed evenly among the destination stores, " + + "complete: If you want to migrate the entire data from source image store to the destination store(s)") + private String migrationType; + + @Parameter(name = ApiConstants.TEMP, + type = CommandType.LONG, + description = "partial: if you want data to be distributed evenly among the destination stores, " + + "complete: If you want to migrate the entire data from source image store to the destination store(s)") + private Long temp; + + + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + + public Long getId() { + return id; + } + + + + public List getMigrateTo() { + return migrateTo; + } + + public String getMigrationType() { + return migrationType; + } + + @Override + public String getEventType() { + return EventTypes.EVENT_TEMPLATE_MIGRATE; + } + + @Override + public String getEventDescription() { + return "Attempting to migrate templates " + "from : " + this.getId() + " to: " + StringUtils.join(getMigrateTo(), ","); + } + + public Long getTemp() { + return temp; + } + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + String message = "Migration still in progress"; + MigrationResponse response = _imageStoreService.migrateData(this); + if (response.getMessage() == null) { + response.setMessage(message); + } + response.setObjectName("imagestore"); + this.setResponseObject(response); + } + + @Override + public String getCommandName() { + return APINAME.toLowerCase() + BaseAsyncCmd.RESPONSE_SUFFIX; + } + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java new file mode 100644 index 000000000000..b3e42113287a --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java @@ -0,0 +1,95 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.command.admin.storage; + +import org.apache.cloudstack.api.APICommand; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.api.ApiErrorCode; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; +import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ImageStoreResponse; +import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; + +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.storage.ImageStore; + +@APICommand(name = UpdateImageStoreCmd.APINAME, description = "Updates image store read-only status", responseObject = ImageStoreResponse.class, entityType = {ImageStore.class}, + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) +public class UpdateImageStoreCmd extends BaseCmd { + private static final Logger LOG = Logger.getLogger(UpdateImageStoreCmd.class.getName()); + public static final String APINAME = "updateImageStore"; + + ///////////////////////////////////////////////////// + //////////////// API parameters ///////////////////// + ///////////////////////////////////////////////////// + @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID") + private Long id; + + @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only") + private Boolean readonly; + + ///////////////////////////////////////////////////// + /////////////////// Accessors /////////////////////// + ///////////////////////////////////////////////////// + + public Long getId() { + return id; + } + + public Boolean getReadonly() { + return readonly; + } + + ///////////////////////////////////////////////////// + /////////////// API Implementation/////////////////// + ///////////////////////////////////////////////////// + + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly()); + ImageStoreResponse storeResponse = null; + if (result != null) { + LOG.debug("PEARL - response received"); + storeResponse = _responseGenerator.createImageStoreResponse(result); + LOG.debug("PEARL - store resp == "+storeResponse.getId() + " name: "+storeResponse.getName()+ " url"+storeResponse.getUrl()); + storeResponse.setResponseName(getCommandName()+"response"); + storeResponse.setObjectName("imagestore"); + setResponseObject(storeResponse); + } else { + throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update Image store status"); + } + + } + + @Override + public String getCommandName() { + return APINAME; + } + + + @Override + public long getEntityOwnerId() { + return CallContext.current().getCallingAccountId(); + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java index aaef652073e8..59e6bc37d143 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/ImageStoreResponse.java @@ -16,8 +16,6 @@ // under the License. package org.apache.cloudstack.api.response; -import com.google.gson.annotations.SerializedName; - import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; import org.apache.cloudstack.api.EntityReference; @@ -25,6 +23,7 @@ import com.cloud.serializer.Param; import com.cloud.storage.ImageStore; import com.cloud.storage.ScopeType; +import com.google.gson.annotations.SerializedName; @EntityReference(value = ImageStore.class) public class ImageStoreResponse extends BaseResponse { @@ -60,6 +59,10 @@ public class ImageStoreResponse extends BaseResponse { @Param(description = "the scope of the image store") private ScopeType scope; + @SerializedName("readonly") + @Param(description = "defines if store is read-only") + private Boolean readonly; + public ImageStoreResponse() { } @@ -132,4 +135,11 @@ public void setProtocol(String protocol) { this.protocol = protocol; } + public Boolean getReadonly() { + return readonly; + } + + public void setReadonly(Boolean readonly) { + this.readonly = readonly; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java new file mode 100644 index 000000000000..0c342ab18e5f --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java @@ -0,0 +1,56 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.api.response; + +import org.apache.cloudstack.api.BaseResponse; + +public class MigrationResponse extends BaseResponse { + private String message; + private String migrationType; + private boolean success; + + public MigrationResponse(String message, String migrationType, boolean success) { + this.message = message; + this.migrationType = migrationType; + this.success = success; + } + + public String getMessage() { + return message; + } + + public void setMessage(String message) { + this.message = message; + } + + public String getMigrationType() { + return migrationType; + } + + public void setMigrationType(String migrationType) { + this.migrationType = migrationType; + } + + public boolean isSuccess() { + return success; + } + + public void setSuccess(boolean success) { + this.success = success; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java index 81fc2f37b0d6..607098b5f3ed 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java @@ -18,6 +18,7 @@ import java.util.Date; import java.util.LinkedHashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -173,6 +174,10 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @Param(description = "additional key/value details tied with template") private Map details; + @SerializedName(ApiConstants.DOWNLOAD_DETAILS) + @Param(description = "Lists the download progress of a template across all secondary storages") + private List downloadDetails; + @SerializedName(ApiConstants.BITS) @Param(description = "the processor bit size", since = "4.10") private int bits; @@ -255,6 +260,10 @@ public void setPublic(boolean isPublic) { this.isPublic = isPublic; } + public void setDownloadProgress(List downloadDetails) { + this.downloadDetails = downloadDetails; + } + public void setCreated(Date created) { this.created = created; } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java new file mode 100644 index 000000000000..6a45fa8b319d --- /dev/null +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java @@ -0,0 +1,30 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.engine.orchestration.service; + +import java.util.List; + +import org.apache.cloudstack.api.response.MigrationResponse; + +public interface StorageOrchestrationService { + public static enum MigrationPolicy { + Balance, Complete + } + + MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, String migrationPolicy, Long temp); +} diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java index ad5b1622cd22..942294f367ba 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java @@ -33,10 +33,16 @@ public interface DataStoreManager { List getImageStoresByScope(ZoneScope scope); + List getImageStoresByScopeExcludingReadOnly(ZoneScope scope); + DataStore getRandomImageStore(long zoneId); + DataStore getRandomUsableImageStore(long zoneId); + DataStore getImageStoreWithFreeCapacity(long zoneId); + DataStore getImageStoreWithFreeCapacity(List imageStores); + List listImageStoresWithFreeCapacity(long zoneId); List getImageCacheStores(Scope scope); @@ -48,4 +54,6 @@ public interface DataStoreManager { List listImageCacheStores(); boolean isRegionStore(DataStore store); + + List orderImageStoresOnFreeCapacity(List stores); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java index 0613a11572f7..ec2725019983 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/EndPointSelector.java @@ -33,6 +33,8 @@ public interface EndPointSelector { List selectAll(DataStore store); + List findAllEndpointsForScope(DataStore store); + EndPoint select(Scope scope, Long storeId); EndPoint select(DataStore store, String downloadUrl); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java index 204cab0bd74c..ae547eb32699 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java @@ -29,6 +29,7 @@ enum State { Ready("Template downloading is accomplished"), Copying("The object is being coping"), Migrating("The object is being migrated"), + Migrated("The object has been migrated"), Destroying("Template is destroying"), Destroyed("Template is destroyed"), Failed("Failed to download template"); @@ -49,8 +50,12 @@ enum Event { DestroyRequested, OperationSuccessed, OperationFailed, + // Added as volume converts migrationrequested to copyrequested - VolumeObject.java + CopyRequested, CopyingRequested, MigrationRequested, + MigrationSucceeded, + MigrationFailed, MigrationCopyRequested, MigrationCopySucceeded, MigrationCopyFailed, diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SecondaryStorageService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SecondaryStorageService.java new file mode 100644 index 000000000000..07828fda5ce7 --- /dev/null +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SecondaryStorageService.java @@ -0,0 +1,43 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.engine.subsystem.api.storage; + +import java.util.List; +import java.util.Map; + +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.storage.command.CommandResult; + +import com.cloud.utils.Pair; + +public interface SecondaryStorageService { + class DataObjectResult extends CommandResult { + private final DataObject data; + + public DataObjectResult(DataObject data) { + super(); + this.data = data; + } + + public DataObject getData() { + return this.data; + } + + } + AsyncCallFuture migrateData(DataObject srcDataObject, DataStore srcDatastore, DataStore destDatastore, Map, Long>> snapshotChain); +} diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java index ef72afc5b777..58a82ac2c740 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/SnapshotInfo.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.engine.subsystem.api.storage; +import java.util.List; + import com.cloud.storage.Snapshot; import com.cloud.utils.exception.CloudRuntimeException; @@ -26,6 +28,8 @@ public interface SnapshotInfo extends DataObject, Snapshot { SnapshotInfo getChild(); + List getChildren(); + VolumeInfo getBaseVolume(); void addPayload(Object data); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java new file mode 100644 index 000000000000..336353f03db6 --- /dev/null +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -0,0 +1,671 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.engine.orchestration; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Hashtable; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.api.response.MigrationResponse; +import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService; +import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService.DataObjectResult; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; +import org.apache.commons.math3.stat.descriptive.moment.Mean; +import org.apache.commons.math3.stat.descriptive.moment.StandardDeviation; +import org.apache.log4j.Logger; + +import com.cloud.configuration.Config; +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.server.StatsCollector; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.ImageStoreService; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.Storage; +import com.cloud.storage.StorageService; +import com.cloud.storage.StorageStats; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.Pair; +import com.cloud.utils.StringUtils; +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.SecondaryStorageVm; +import com.cloud.vm.SecondaryStorageVmVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.SecondaryStorageVmDao; + +public class StorageOrchestrator extends ManagerBase implements StorageOrchestrationService, Configurable { + + private static final Logger s_logger = Logger.getLogger(StorageOrchestrator.class); + @Inject + TemplateDataStoreDao templateDataStoreDao; + @Inject + SnapshotDataStoreDao snapshotDataStoreDao; + @Inject + VolumeDataStoreDao volumeDataStoreDao; + @Inject + VolumeDataFactory volumeFactory; + @Inject + VMTemplateDao templateDao; + @Inject + TemplateDataFactory templateFactory; + @Inject + SnapshotDao snapshotDao; + @Inject + SnapshotDataFactory snapshotFactory; + @Inject + DataStoreManager dataStoreManager; + @Inject + ImageStoreDao imageStoreDao; + @Inject + StatsCollector statsCollector; + @Inject + public StorageService storageService; + @Inject + SecondaryStorageVmDao secStorageVmDao; + @Inject + ConfigurationDao configDao; + @Inject + HostDao hostDao; + @Inject + private AsyncJobManager jobMgr; + @Inject + private SecondaryStorageService secStgSrv; + + ConfigKey ImageStoreImbalanceThreshold = new ConfigKey<>("Advanced", Double.class, + "image.store.imbalance.threshold", + "0.1", + "The storage imbalance threshold that is compared with the standard deviation percentage for a storage utilization metric. " + + "The value is a percentage in decimal format.", + true, ConfigKey.Scope.Global); + + Integer numConcurrentCopyTasksPerSSVM = 2; + + private double imageStoreCapacityThreshold = 0.90; + + @Override + public String getConfigComponentName() { + return StorageOrchestrationService.class.getName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[]{ImageStoreImbalanceThreshold}; + } + + static class MigrateBlockingQueue extends ArrayBlockingQueue { + + MigrateBlockingQueue(int size) { + super(size); + } + + public boolean offer(T task) { + try { + this.put(task); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return true; + } + } + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + numConcurrentCopyTasksPerSSVM = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageCopyCmdMaxSessions.key()), 2); + return true; + } + + @Override + public MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, String migrationPolicy, Long temp) { + List files = new LinkedList<>(); + int successCount = 0; + boolean success = true; + String message = null; + + if (migrationPolicy.equals(MigrationPolicy.Complete.toString())) { + if (!filesReady(srcDataStoreId)) { + throw new CloudRuntimeException("Complete migration failed as there are data objects which are not Ready"); + } + } + + DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image); + Map, Long>> snapshotChains = new HashMap<>(); + files.addAll(getAllValidTemplates(srcDatastore)); + files.addAll(getAllValidSnapshotChains(srcDatastore, snapshotChains)); + files.addAll(getAllValidVolumes(srcDatastore)); + + Collections.sort(files, new Comparator() { + @Override + public int compare(DataObject o1, DataObject o2) { + Long size1 = o1.getSize(); + Long size2 = o2.getSize(); + if (o1 instanceof SnapshotInfo) { + size1 = snapshotChains.get(o1).second(); + } + if (o2 instanceof SnapshotInfo) { + size2 = snapshotChains.get(o2).second(); + } + //return o2.getSize() > o1.getSize() ? 1 : -1; + return size2 > size1 ? 1 : -1; + } + }); + + s_logger.debug("PEARL - sorted files"); + for (DataObject obj : files) { + s_logger.debug("PEARL - data object: " + obj.getDataStore().getName() + " Size : " + obj.getSize()); + } + + if (files.isEmpty()) { + return new MigrationResponse("No files in Image store "+srcDatastore.getId()+ " to migrate", migrationPolicy, true); + } + + // Create capacity class with free and total space, maybe id of ds too and use that as the value + Map> storageCapacities = new Hashtable<>(); + + for (Long storeId : destDatastores) { + storageCapacities.put(storeId, new Pair<>(null, null)); + } + + storageCapacities.put(srcDataStoreId, new Pair<>(null, null)); + s_logger.debug("PEARL - before all"); + for (Map.Entry> entry : storageCapacities.entrySet()) { + s_logger.debug("PEARL - store id : " + entry.getKey() + " free capacity: " + entry.getValue().first() + " total cap: " + entry.getValue().second()); + } + + // If the migration policy is to completely migrate data from the given source Image Store, then set it's state + // to readonly + if (migrationPolicy.equals(ImageStoreService.MigrationPolicy.Complete.toString())) { + s_logger.debug("PEARL - setting source image store "+srcDatastore.getId()+ " to read-only"); + storageService.updateImageStoreStatus(srcDataStoreId, true); + } + + storageCapacities = getStorageCapacities(storageCapacities); + double meanstddev = getStandardDeviation(storageCapacities); + double threshold = ImageStoreImbalanceThreshold.value(); + MigrationResponse response = null; + + // TODO: core = max; & core = no of ssvms * concurrent/ssvm + ThreadPoolExecutor executor = new ThreadPoolExecutor(numConcurrentCopyTasksPerSSVM , numConcurrentCopyTasksPerSSVM, 30, TimeUnit.MINUTES, new MigrateBlockingQueue<>(2)); + // TODO : return if meanstddev < threshold + s_logger.debug("PEARL - mean std deviation = " + meanstddev); + + // TODO: uncomment when testing is completed +// if (meanstddev < threshold) { +// s_logger.debug("PEARL - mean std deviation of the storages is below threshold, no migration required"); +// response = new MigrationResponse("Migration not required as system seems balanced", migrationType, true); +// return response; +// } + + List>> futures = new ArrayList<>(); + + while (true) { + s_logger.debug("PEARL - files size == " + files.size()); + s_logger.debug("PEARL - datastore dest size == " + destDatastores.size()); + s_logger.debug("PEARL - stores to capacity map == "); + for (Map.Entry> entry : storageCapacities.entrySet()) { + s_logger.debug("PEARL - store id : " + entry.getKey() + " free capacity: " + entry.getValue().first() + " total cap: " + entry.getValue().second()); + } + + DataObject chosenFileForMigration = null; + if (files.size() > 0) { + chosenFileForMigration = files.remove(0); + } + + // Choose datastore with maximum free capacity as the destination datastore for migration + storageCapacities = getStorageCapacities(storageCapacities); + List orderedDS = sortDataStores(storageCapacities); + Long destDatastoreId = orderedDS.get(0); + + // If there aren't anymore files available for migration or no valid Image stores available for migration + // end the migration process + destDatastoreId = temp; + s_logger.debug("PEARL - chosen file = "+ (chosenFileForMigration != null ? chosenFileForMigration.getId() : "null file")); + s_logger.debug("PEARL - destid "+ destDatastoreId); + s_logger.debug("PEARL - src id = "+ srcDatastore.getId()); + if (chosenFileForMigration == null || destDatastoreId == null || destDatastoreId == srcDatastore.getId()) { + s_logger.debug("PEARL - migration completed "); + if (destDatastoreId == srcDatastore.getId() && !files.isEmpty() ) { + if (migrationPolicy.equals(ImageStoreService.MigrationPolicy.Balance.toString())) { + s_logger.debug("PEARL - src id = dest id"); + message = "Image stores have been balanced"; + success = true; + } else { + message = "Files not completely migrated from "+ srcDatastore.getId() + + " If you want to continue using the Image Store, please change the read-only status using 'update imagestore' command"; + success = false; + } + } else { + message = "Migration completed"; + success = true; + } + break; + } + + if (chosenFileForMigration.getSize() > storageCapacities.get(destDatastoreId).first()) { + s_logger.debug("PEARL - file " + chosenFileForMigration.getId() + " too large to be migrated to " + destDatastoreId); + continue; + } + + // If there is a benefit in migration of the chosen file to the destination store, then proceed with migration + if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, migrationPolicy)) { + Long fileSize = getFileSize(chosenFileForMigration, snapshotChains); + s_logger.debug("PEARL - in migrate decision function - yes"); + s_logger.debug("PEARL - current metrics = "); + for (Map.Entry> p : storageCapacities.entrySet()) { + s_logger.debug("PEARL - Datastore : " + p.getKey() + " free capacity: " + p.getValue().first() + " total capacity: " + p.getValue().second()); + } + + storageCapacities = assumeMigrate(storageCapacities, srcDatastore.getId(), destDatastoreId, fileSize); + + long activeSsvms = activeSSVMCount(srcDatastore); + long totalJobs = activeSsvms * numConcurrentCopyTasksPerSSVM; + s_logger.debug("PEARL - total jobs = "+ totalJobs); + // Increase thread pool size with increase in number of SSVMs + if ( totalJobs > executor.getCorePoolSize()) { + executor.setMaximumPoolSize((int) (totalJobs)); + executor.setCorePoolSize((int) (totalJobs)); + s_logger.debug("PEARL - max pool size : "+ executor.getMaximumPoolSize()); + s_logger.debug("PEARL - core pool size : "+ executor.getCorePoolSize()); + } + + MigrateDataTask task = new MigrateDataTask(chosenFileForMigration, srcDatastore, dataStoreManager.getDataStore(destDatastoreId, DataStoreRole.Image)); + if (chosenFileForMigration instanceof SnapshotInfo ) { + task.setSnapshotChains(snapshotChains); + } + futures.add((executor.submit(task))); + s_logger.debug("PEARL - migration of file " + chosenFileForMigration.getId() + " is done"); + } else { + s_logger.debug("PEARL - migration completed!"); + if (migrationPolicy.equals(ImageStoreService.MigrationPolicy.Balance.toString())) { + message = "Migration completed and has successfully balanced the data objects among stores: " + StringUtils.join(storageCapacities.keySet(), ","); + } else { + message = "Complete migration failed. Please set the source Image store to read-write mode if you want to continue using it"; + success = false; + } + break; + } + } + + for (Future> future : futures) { + try { + AsyncCallFuture res = future.get(); + if (res.get().isSuccess()) { + successCount++; + } + } catch ( InterruptedException | ExecutionException e) { + throw new CloudRuntimeException("Failed to get result"); + } + } + message += ". successful migrations: "+successCount; + return new MigrationResponse(message, migrationPolicy, success); + } + + private Map> getStorageCapacities(Map> storageCapacities) { + Map> capacities = new Hashtable<>(); + for (Long storeId : storageCapacities.keySet()) { + s_logger.debug("PEARL - store ID = " + storeId); + StorageStats stats = statsCollector.getStorageStats(storeId); + if (stats != null) { + if (storageCapacities.get(storeId) == null || storageCapacities.get(storeId).first() == null || storageCapacities.get(storeId).second() == null) { + s_logger.debug("PEARL - free caap : " + (stats.getCapacityBytes() - stats.getByteUsed())); + s_logger.debug("PEARL - total cap : " + stats.getCapacityBytes()); + capacities.put(storeId, new Pair<>(stats.getCapacityBytes() - stats.getByteUsed(), stats.getCapacityBytes())); + } else { + long totalCapacity = stats.getCapacityBytes(); + Long freeCapacity = totalCapacity - stats.getByteUsed(); + s_logger.debug("PEARL - pair value: " + storageCapacities.get(storeId)); + s_logger.debug("PEARL - free capacity = " + freeCapacity); + if (freeCapacity >= storageCapacities.get(storeId).first()) { + capacities.put(storeId, storageCapacities.get(storeId)); + } else { + capacities.put(storeId, new Pair<>(freeCapacity, totalCapacity)); + } + } + } else { + throw new CloudRuntimeException("Stats Collector hasn't yet collected metrics from the Image store, kindly try again later"); + } + } + s_logger.debug("PEARL - stg capacities computed"); + for (Map.Entry> p : capacities.entrySet()) { + s_logger.debug("PEARL - Datastore : " + p.getKey() + " free capacity: " + p.getValue().first() + " total capacity: " + p.getValue().second()); + } + return capacities; + } + + + /** + * + * @param storageCapacities Map comprising the metrics(free and total capacities) of the images stores considered + * @return mean standard deviation + */ + private double getStandardDeviation(Map> storageCapacities) { + double[] freeCapacities = storageCapacities.values().stream().mapToDouble(x -> ((double) x.first() / x.second())).toArray(); + s_logger.debug("PEARL - free capcitites size :"); + for (double cap : freeCapacities) { + s_logger.debug("PEARL - cap : " + cap); + } + double mean = calculateStorageMean(freeCapacities); + s_logger.debug("PEARL: - mean = " + mean); + return (calculateStorageStandardDeviation(freeCapacities, mean) / mean); + } + + /** + * Sorts the datastores in decreasing order of their free capacities, so as to make + * an informed decision of picking the datastore with maximum free capactiy for migration + */ + private List sortDataStores(Map> storageCapacities) { + s_logger.debug("PEARL - storage capacity size: " + storageCapacities.size()); + List>> list = + new LinkedList>>((storageCapacities.entrySet())); + + Collections.sort(list, new Comparator>>() { + @Override + public int compare(Map.Entry> e1, Map.Entry> e2) { + return e2.getValue().first() > e1.getValue().first() ? 1 : -1; + } + }); + HashMap> temp = new LinkedHashMap<>(); + for (Map.Entry> value : list) { + s_logger.debug("PEARL - list : " + value.getKey() + " pair val: " + value.getValue()); + temp.put(value.getKey(), value.getValue()); + } + + s_logger.debug("PEARL - temp size: " + temp.size()); + for (Map.Entry> e : temp.entrySet()) { + s_logger.debug("PEARL - storeID : " + e.getKey() + " pair val: " + e.getValue()); + } + return new ArrayList<>(temp.keySet()); + } + + /** + * + * @param storageCapacities Map comprising the metrics(free and total capacities) of the images stores considered + * @param srcDsId source image store ID from where data is to be migrated + * @param destDsId destination image store ID to where data is to be migrated + * @param fileSize size of the data object to be migrated so as to recompute the storage metrics + * @return a map - Key: Datastore ID ; Value: Pair + */ + private Map> assumeMigrate(Map> storageCapacities, Long srcDsId, Long destDsId, Long fileSize) { + Map> modifiedCapacities = new Hashtable<>(); + modifiedCapacities.putAll(storageCapacities); + Pair srcDSMetrics = storageCapacities.get(srcDsId); + Pair destDSMetrics = storageCapacities.get(destDsId); + modifiedCapacities.put(srcDsId, new Pair<>(srcDSMetrics.first() + fileSize, srcDSMetrics.second())); + modifiedCapacities.put(destDsId, new Pair<>(destDSMetrics.first() - fileSize, destDSMetrics.second())); + return modifiedCapacities; + } + + private Long getFileSize(DataObject file, Map, Long>> snapshotChain) { + Long size = file.getSize(); + Pair, Long> chain = snapshotChain.get(file); + if (file instanceof SnapshotInfo && chain.first() != null) { + size = chain.second(); + } + return size; + } + + /** + * This function determines if migration should in fact take place or not : + * - For Balanced migration - the mean standard deviation is calculated before and after (supposed) migration + * and a decision is made if migration is afterall beneficial + * - For Complete migration - We check if the destination image store has sufficient capacity i.e., below the threshold of (90%) + * and then proceed with the migration + * @param chosenFile file for migration + * @param srcDatastoreId source image store ID from where data is to be migrated + * @param destDatastoreId destination image store ID to where data is to be migrated + * @param storageCapacities Map comprising the metrics(free and total capacities) of the images stores considered + * @param snapshotChains Map containing details of chain of snapshots and their cumulative size + * @param migrationPolicy determines whether a "Balance" or "Complete" migration operation is to be performed + * @return + */ + private boolean shouldMigrate(DataObject chosenFile, Long srcDatastoreId, Long destDatastoreId, Map> storageCapacities, + Map, Long>> snapshotChains, String migrationPolicy) { + //private boolean shouldMigrate(DummyObject chosenFile, Long srcDatastoreId, Long destDatastoreId, Map> storageCapacities, String policy) { + return true; +// if (migrationPolicy == MigrationPolicy.Balance.toString()) { +// double meanStdDevCurrent = getStandardDeviation(storageCapacities); +// +// s_logger.debug("PEARL - meanstd deviation before migration = " + meanStdDevCurrent); +// Long fileSize = getFileSize(chosenFile, snapshotChains) +// Map> proposedCapacities = assumeMigrate(storageCapacities, srcDatastoreId, destDatastoreId, fileSize); +// double meanStdDevAfter = getStandardDeviation(proposedCapacities); +// +// // calculateStorageImbalanceAfterSupposedMigration(stores, storesToCapacityMap, meanStdDeviation, fileSize); +// +// s_logger.debug("PEARL - meanstd deviation after migration = " + meanStdDevAfter); +// +//// if (meanStdDevAfter > meanStdDevCurrent) { +//// s_logger.debug("PEARL - migrating the file doesn't prove to be beneficial, skipping migration"); +//// return false; +//// } +// +// Double threshold = ImageStoreImbalanceThreshold.value(); +// if (meanStdDevCurrent > threshold && storageCapacityBelowThreshold(storageCapacities, destDatastoreId)) { +// return true; +// } +// } else { +// if (storageCapacityBelowThreshold(storageCapacities, destDatastoreId)) { +// return true; +// } +// } +// return false; + } + + private boolean storageCapacityBelowThreshold(Map> storageCapacities, Long destStoreId) { + Pair imageStoreCapacity = storageCapacities.get(destStoreId); + if (imageStoreCapacity != null && (imageStoreCapacity.first() / (imageStoreCapacity.second() * 1.0)) <= imageStoreCapacityThreshold) { + s_logger.debug("PEARL - image store has sufficient capacity to proceed with migration of file"); + return true; + } + s_logger.debug("PEARL - image store capacity threshold exceeded, migration not possible"); + return false; + } + + private double calculateStorageMean(double[] storageMetrics) { + return new Mean().evaluate(storageMetrics); + } + + private double calculateStorageStandardDeviation(double[] metricValues, double mean) { + StandardDeviation standardDeviation = new StandardDeviation(false); + return standardDeviation.evaluate(metricValues, mean); + } + + /** This function verifies if the given image store comprises of data objects that are not in either the "Ready" or + * "Allocated" state - in such a case, if the migration policy is complete, the migration is terminated + */ + private boolean filesReady(Long srcDataStoreId) { + String[] validStates = new String[]{"Ready", "Allocated"}; + boolean isReady = true; + List templates = templateDataStoreDao.listByStoreId(srcDataStoreId); + for (TemplateDataStoreVO template : templates) { + isReady &= (Arrays.asList(validStates).contains(template.getState().toString())); + } + List snapshots = snapshotDataStoreDao.listByStoreId(srcDataStoreId, DataStoreRole.Image); + for (SnapshotDataStoreVO snapshot : snapshots) { + isReady &= (Arrays.asList(validStates).contains(snapshot.getState().toString())); + } + List volumes = volumeDataStoreDao.listByStoreId(srcDataStoreId); + for (VolumeDataStoreVO volume : volumes) { + isReady &= (Arrays.asList(validStates).contains(volume.getState().toString())); + } + return isReady; + } + + // Gets list of all valid templates, i.e, templates in "Ready" state for migration + private List getAllValidTemplates(DataStore srcDataStore) { + + List files = new LinkedList<>(); + List templates = templateDataStoreDao.listByStoreId(srcDataStore.getId()); + for (TemplateDataStoreVO template : templates) { + VMTemplateVO templateVO = templateDao.findById(template.getTemplateId()); + if (template.getState() == ObjectInDataStoreStateMachine.State.Ready && !templateVO.isPublicTemplate() && templateVO.getTemplateType() != Storage.TemplateType.SYSTEM) { + files.add(templateFactory.getTemplate(template.getTemplateId(), srcDataStore)); + } + } + return files; + } + + /** Returns parent snapshots and snapshots that do not have any children; snapshotChains comprises of the snapshot chain info + * for each parent snapshot and the cumulative size of the chain - this is done to ensure that all the snapshots in a chain + * are migrated to the same datastore + */ + private List getAllValidSnapshotChains(DataStore srcDataStore, Map, Long>> snapshotChains) { + List files = new LinkedList<>(); + List snapshots = snapshotDataStoreDao.listByStoreId(srcDataStore.getId(), DataStoreRole.Image); + for (SnapshotDataStoreVO snapshot : snapshots) { + SnapshotVO snapshotVO = snapshotDao.findById(snapshot.getSnapshotId()); + if (snapshot.getState() == ObjectInDataStoreStateMachine.State.Ready && snapshot.getParentSnapshotId() == 0 ) { + SnapshotInfo snap = snapshotFactory.getSnapshot(snapshotVO.getSnapshotId(), DataStoreRole.Image); + files.add(snap); + } + } + + for (SnapshotInfo parent : files) { + List chain = new ArrayList<>(); + chain.add(parent); + for (int i =0; i< chain.size(); i++) { + SnapshotInfo child = chain.get(i); + List children = child.getChildren(); + if (children != null) { + chain.addAll(children); + } + } + snapshotChains.put(parent, new Pair, Long>(chain, getSizeForChain(chain))); + } + //Log + for (DataObject snap: snapshotChains.keySet()) { + s_logger.debug("PEARL - parent = "+snap); + List chain = snapshotChains.get(snap).first(); + s_logger.debug("PEARL - chain: "); + for (int i =0;i "); + } + } + return (List) (List) files; + } + + // Finds the cumulative file size for all data objects in the chain + private Long getSizeForChain(List chain) { + Long size = 0L; + for (SnapshotInfo snapshot : chain) { + size += snapshot.getSize(); + } + return size; + } + + // Returns a list of volumes that are in "Ready" state + private List getAllValidVolumes(DataStore srcDataStore) { + List files = new LinkedList<>(); + List volumes = volumeDataStoreDao.listByStoreId(srcDataStore.getId()); + for (VolumeDataStoreVO volume : volumes) { + if (volume.getState() == ObjectInDataStoreStateMachine.State.Ready) { + files.add(volumeFactory.getVolume(volume.getVolumeId(), srcDataStore)); + } + } + return files; + } + + /** Returns the count of active SSVMs - SSVM with agents in connected state, so as to dynamically increase the thread pool + * size when SSVMs scale + */ + private int activeSSVMCount(DataStore dataStore) { + long datacenterId = dataStore.getScope().getScopeId(); + List ssvms = + secStorageVmDao.getSecStorageVmListInStates(SecondaryStorageVm.Role.templateProcessor, datacenterId, VirtualMachine.State.Running, VirtualMachine.State.Migrating); + int activeSSVMs = 0; + for (SecondaryStorageVmVO vm : ssvms) { + String name = "s-"+vm.getId()+"-VM"; + HostVO ssHost = hostDao.findByName(name); + if (ssHost != null) { + if (ssHost.getState() == Status.Up) { + activeSSVMs++; + } + } + } + return activeSSVMs; + } + + private class MigrateDataTask implements Callable> { + private DataObject file; + private DataStore srcDataStore; + private DataStore destDataStore; + private Map, Long>> snapshotChain; + public MigrateDataTask(DataObject file, DataStore srcDataStore, DataStore destDataStore) { + this.file = file; + this.srcDataStore = srcDataStore; + this.destDataStore = destDataStore; + } + + public void setSnapshotChains(Map, Long>> snapshotChain) { + this.snapshotChain = snapshotChain; + } + + public Map, Long>> getSnapshotChain() { + return snapshotChain; + } + public DataObject getFile() { + return file; + } + + @Override + public AsyncCallFuture call() throws Exception { + s_logger.debug("PEARL - running migration TASK"); + return secStgSrv.migrateData(file, srcDataStore, destDataStore, snapshotChain); + } + } +} diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 9e2168e0bfd0..6a0a40f24e19 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -141,7 +141,6 @@ import com.cloud.vm.dao.UserVmDao; public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable { - public enum UserVmCloneType { full, linked } diff --git a/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml b/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml index 3ded395bb66f..da3e377b5a28 100644 --- a/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml +++ b/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml @@ -44,6 +44,9 @@ value="#{storagePoolAllocatorsRegistry.registered}" /> + + , StateDao listByHostCapability(Host.Type type, Long clusterId, Long podId, long dcId, String hostCapabilty); List listByClusterAndHypervisorType(long clusterId, HypervisorType hypervisorType); + + HostVO findByName(String name); } diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java index ec4573faf226..5f60124d7c2c 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDaoImpl.java @@ -1265,6 +1265,13 @@ public List listByClusterAndHypervisorType(long clusterId, HypervisorTyp return listBy(sc); } + @Override + public HostVO findByName(String name) { + SearchCriteria sc = NameSearch.create(); + sc.setParameters("name", name); + return findOneBy(sc); + } + private ResultSet executeSqlGetResultsetForMethodFindHostInZoneToExecuteCommand(HypervisorType hypervisorType, long zoneId, TransactionLegacy tx, String sql) throws SQLException { PreparedStatement pstmt = tx.prepareAutoCloseStatement(sql); pstmt.setString(1, Objects.toString(hypervisorType)); diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDao.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDao.java index fb57563131e6..98fc8c8687b8 100644 --- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDao.java +++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDao.java @@ -22,4 +22,5 @@ public interface CommandExecLogDao extends GenericDao { public void expungeExpiredRecords(Date cutTime); + public Integer getCopyCmdCountForSSVM(Long id); } diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java index ac438b0d1173..f5cfc1ed2839 100644 --- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java @@ -17,7 +17,7 @@ package com.cloud.secstorage; import java.util.Date; - +import java.util.List; import org.springframework.stereotype.Component; @@ -30,11 +30,17 @@ public class CommandExecLogDaoImpl extends GenericDaoBase implements CommandExecLogDao { protected final SearchBuilder ExpungeSearch; + protected final SearchBuilder CommandSearch; public CommandExecLogDaoImpl() { ExpungeSearch = createSearchBuilder(); ExpungeSearch.and("created", ExpungeSearch.entity().getCreated(), Op.LT); ExpungeSearch.done(); + + + CommandSearch = createSearchBuilder(); + CommandSearch.and("host_id", CommandSearch.entity().getHostId(), Op.EQ); + CommandSearch.and("command_name", CommandSearch.entity().getCommandName(), Op.EQ); } @Override @@ -43,4 +49,15 @@ public void expungeExpiredRecords(Date cutTime) { sc.setParameters("created", cutTime); expunge(sc); } + + @Override + public Integer getCopyCmdCountForSSVM(Long id) { + SearchCriteria sc = CommandSearch.create(); + sc.setParameters("host_id", id); + sc.setParameters("command_name", "CopyCommand"); + List copyCmds = customSearch(sc, null); + return copyCmds.size(); + } + + } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java index 1861b21a38ad..9ade4f9d0881 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java @@ -31,6 +31,8 @@ public interface ImageStoreDao extends GenericDao { List findByScope(ZoneScope scope); + List findByScopeExcludingReadOnly(ZoneScope scope); + List findRegionImageStores(); List findImageCacheByScope(ZoneScope scope); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java index 38124ea49e0e..006d31e7df06 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java @@ -91,6 +91,22 @@ public List findByScope(ZoneScope scope) { return listBy(sc); } + @Override + public List findByScopeExcludingReadOnly(ZoneScope scope) { + SearchCriteria sc = createSearchCriteria(); + sc.addAnd("role", SearchCriteria.Op.EQ, DataStoreRole.Image); + if (scope.getScopeId() != null) { + SearchCriteria scc = createSearchCriteria(); + scc.addOr("scope", SearchCriteria.Op.EQ, ScopeType.REGION); + scc.addOr("dcId", SearchCriteria.Op.EQ, scope.getScopeId()); + sc.addAnd("scope", SearchCriteria.Op.SC, scc); + sc.addAnd("readonly", SearchCriteria.Op.EQ, Boolean.FALSE); + } + // we should return all image stores if cross-zone scope is passed + // (scopeId = null) + return listBy(sc); + } + @Override public List findRegionImageStores() { SearchCriteria sc = regionSearch.create(); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java index 2c706774a4d8..d24582714868 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreVO.java @@ -74,6 +74,9 @@ public class ImageStoreVO implements ImageStore { @Enumerated(value = EnumType.STRING) private DataStoreRole role; + @Column(name = "readonly") + private boolean readonly = false; + @Column(name = "parent") private String parent; @@ -165,6 +168,14 @@ public Date getCreated() { return created; } + public void setReadonly(boolean readonly) { + this.readonly = readonly; + } + + public boolean isReadonly() { + return readonly; + } + public void setCreated(Date created) { this.created = created; } diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql b/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql index bc0094150fc5..9dd0b39862c4 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41310to41400.sql @@ -49,6 +49,33 @@ ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `backup_offering_id` bigint unsigne ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `backup_external_id` varchar(255) DEFAULT NULL COMMENT 'ID of external backup job or container if any'; ALTER TABLE `cloud`.`vm_instance` ADD COLUMN `backup_volumes` text DEFAULT NULL COMMENT 'details of backedup volumes'; +ALTER TABLE `cloud`.`image_store` ADD COLUMN `readonly` boolean DEFAULT false COMMENT 'defines status of image store'; + +ALTER VIEW `cloud`.`image_store_view` AS + select + image_store.id, + image_store.uuid, + image_store.name, + image_store.image_provider_name, + image_store.protocol, + image_store.url, + image_store.scope, + image_store.role, + image_store.readonly, + image_store.removed, + data_center.id data_center_id, + data_center.uuid data_center_uuid, + data_center.name data_center_name, + image_store_details.name detail_name, + image_store_details.value detail_value + from + `cloud`.`image_store` + left join + `cloud`.`data_center` ON image_store.data_center_id = data_center.id + left join + `cloud`.`image_store_details` ON image_store_details.store_id = image_store.id; + + CREATE TABLE IF NOT EXISTS `cloud`.`backups` ( `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, `uuid` varchar(40) NOT NULL UNIQUE, diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java index c2724e648241..f7a5feaad39f 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java @@ -25,12 +25,6 @@ import javax.inject.Inject; -import com.cloud.storage.Volume; -import com.cloud.storage.VolumeVO; -import com.cloud.storage.dao.VolumeDao; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionStrategy; @@ -39,9 +33,14 @@ import org.apache.cloudstack.engine.subsystem.api.storage.StorageStrategyFactory; import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.agent.api.to.VirtualMachineTO; import com.cloud.host.Host; +import com.cloud.storage.Volume; +import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.VolumeDao; import com.cloud.utils.StringUtils; import com.cloud.utils.exception.CloudRuntimeException; @@ -60,7 +59,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As if (srcData.getDataStore() == null || destData.getDataStore() == null) { throw new CloudRuntimeException("can't find data store"); } - + LOGGER.debug("PEARL - getting copyasync driver!!"); if (srcData.getDataStore().getDriver().canCopy(srcData, destData)) { srcData.getDataStore().getDriver().copyAsync(srcData, destData, callback); return; diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java new file mode 100644 index 000000000000..cd4e88eddc71 --- /dev/null +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java @@ -0,0 +1,165 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.storage.image; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ExecutionException; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; +import org.apache.cloudstack.engine.subsystem.api.storage.DataMotionService; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.framework.async.AsyncCallFuture; +import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; +import org.apache.cloudstack.framework.async.AsyncCompletionCallback; +import org.apache.cloudstack.framework.async.AsyncRpcContext; +import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.log4j.Logger; + +import com.cloud.secstorage.CommandExecLogDao; +import com.cloud.utils.Pair; + +public class StorageServiceImpl implements SecondaryStorageService { + + private static final Logger s_logger = Logger.getLogger(StorageServiceImpl.class); + + @Inject + DataMotionService motionSrv; + @Inject + CommandExecLogDao _cmdExecLogDao; + + private class MigrateDataContext extends AsyncRpcContext { + final DataObject srcData; + final DataObject destData; + final AsyncCallFuture future; + + /** + * @param callback + */ + public MigrateDataContext(AsyncCompletionCallback callback, AsyncCallFuture future, DataObject srcData, DataObject destData, DataStore destStore) { + super(callback); + this.srcData = srcData; + this.destData = destData; + this.future = future; + } + } + + @Override + public AsyncCallFuture migrateData(DataObject srcDataObject, DataStore srcDatastore, DataStore destDatastore, Map, Long>> snapshotChain) { + AsyncCallFuture future = new AsyncCallFuture(); + DataObjectResult res = new DataObjectResult(srcDataObject); + DataObject destDataObject = null; + try { + if (srcDataObject instanceof SnapshotInfo && snapshotChain.keySet().contains(srcDataObject)) { + s_logger.debug("PEARL - snapshot instance with a chain of snaps: size"+ snapshotChain.get(srcDataObject).first().size()); + for (SnapshotInfo snapshotInfo : snapshotChain.get(srcDataObject).first()) { + destDataObject = destDatastore.create(snapshotInfo); + snapshotInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); + destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); + // migrateJob(future, snapshotInfo, destDataObject, destDatastore); + s_logger.debug("PEARL - snap name: "+ snapshotInfo.getName()); + MigrateDataContext context = new MigrateDataContext(null, future, snapshotInfo, destDataObject, destDatastore); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); + s_logger.debug(snapshotInfo.getDataStore().getTO().toString()); + motionSrv.copyAsync(snapshotInfo, destDataObject, caller); + } + } else { + s_logger.debug("PEARL - not a snapshot instance"); + destDataObject = destDatastore.create(srcDataObject); + srcDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); + destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); + //migrateJob(future, srcDataObject, destDataObject, destDatastore); + MigrateDataContext context = new MigrateDataContext(null, future, srcDataObject, destDataObject, destDatastore); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); + s_logger.debug(srcDataObject.getDataStore().getTO().toString()); + motionSrv.copyAsync(srcDataObject, destDataObject, caller); + } + } catch (Exception e) { + s_logger.debug("Failed to copy Data", e); + if (destDataObject != null) { + destDataObject.getDataStore().delete(destDataObject); + srcDataObject.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + } + res.setResult(e.toString()); + future.complete(res); + } + return future; + } + +// protected void migrateJob(AsyncCallFuture future, DataObject srcDataObject, DataObject destDataObject, DataStore destDatastore) throws ExecutionException, InterruptedException { +// s_logger.debug("PEARL - in migrateJob() "); +// MigrateDataContext context = new MigrateDataContext(null, future, srcDataObject, destDataObject, destDatastore); +// AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); +// caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); +// s_logger.debug(srcDataObject.getDataStore().getTO().toString()); +// motionSrv.copyAsync(srcDataObject, destDataObject, caller); +// } + + /** + * Callback function to handle state change of source and destination data objects based on the success or failure of the migrate task + */ + protected Void migrateDataCallBack(AsyncCallbackDispatcher callback, MigrateDataContext context) throws ExecutionException, InterruptedException { + s_logger.debug("PEARL - completed transfer - @ migrate callback"); + DataObject srcData = context.srcData; + DataObject destData = context.destData; + s_logger.debug("PEARL - src data = "+srcData.getUri()); + s_logger.debug("PEARL - dest data = "+ destData.getUri()); + CopyCommandResult result = callback.getResult(); + AsyncCallFuture future = context.future; + DataObjectResult res = new DataObjectResult(srcData); + CopyCmdAnswer answer = (CopyCmdAnswer) result.getAnswer(); + try { + if (!answer.getResult()) { + s_logger.debug("PEARL - migration failed"); + res.setResult(result.getResult()); + srcData.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + destData.processEvent(ObjectInDataStoreStateMachine.Event.MigrationFailed); + destData.processEvent(ObjectInDataStoreStateMachine.Event.DestroyRequested); + + if (destData != null) { + destData.getDataStore().delete(destData); + } + + } else { + s_logger.debug("PEARL - migration succeeded"); + destData.processEvent(ObjectInDataStoreStateMachine.Event.OperationSuccessed, answer); + s_logger.debug("PEARL - Deleting source data"); + srcData.getDataStore().delete(srcData); + } + _cmdExecLogDao.expunge(Long.parseLong(answer.getContextParam("cmd"))); + future.complete(res); + } catch (Exception e) { + s_logger.error("Failed to process migrate data callback", e); + res.setResult(e.toString()); + _cmdExecLogDao.expunge(Long.parseLong(answer.getContextParam("cmd"))); + future.complete(res); + } + return null; + } + +} + + diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java index 8343a74d60b7..043af9a49ac9 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateDataFactoryImpl.java @@ -18,21 +18,17 @@ */ package org.apache.cloudstack.storage.image; -import com.cloud.host.HostVO; -import com.cloud.host.dao.HostDao; import java.util.ArrayList; import java.util.List; import javax.inject.Inject; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.utils.exception.CloudRuntimeException; import org.apache.cloudstack.direct.download.DirectDownloadManager; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao; import org.apache.cloudstack.storage.datastore.db.StoragePoolVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; @@ -41,11 +37,15 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.host.HostVO; +import com.cloud.host.dao.HostDao; +import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.DataStoreRole; import com.cloud.storage.VMTemplateStoragePoolVO; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplatePoolDao; +import com.cloud.utils.exception.CloudRuntimeException; @Component public class TemplateDataFactoryImpl implements TemplateDataFactory { @@ -230,5 +230,4 @@ public boolean isTemplateMarkedForDirectDownload(long templateId) { VMTemplateVO templateVO = imageDataDao.findById(templateId); return templateVO.isDirectDownload(); } - } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index edf824403e17..57148613ba58 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -253,7 +253,8 @@ public void downloadBootstrapSysTemplate(DataStore store) { @Override public void handleSysTemplateDownload(HypervisorType hostHyper, Long dcId) { Set toBeDownloaded = new HashSet(); - List stores = _storeMgr.getImageStoresByScope(new ZoneScope(dcId)); + //List stores = _storeMgr.getImageStoresByScope(new ZoneScope(dcId)); + List stores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(dcId)); if (stores == null || stores.isEmpty()) { return; } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java index 80e5b38f1f76..90089757a927 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java @@ -118,6 +118,16 @@ public List listImageStoresByScope(ZoneScope scope) { return imageStores; } + @Override + public List listImageStoresByScopeExcludingReadOnly(ZoneScope scope) { + List stores = dataStoreDao.findByScopeExcludingReadOnly(scope); + List imageStores = new ArrayList(); + for (ImageStoreVO store : stores) { + imageStores.add(getImageStore(store.getId())); + } + return imageStores; + } + @Override public List listImageStoreByProvider(String provider) { List stores = dataStoreDao.findByProvider(provider); @@ -178,6 +188,31 @@ public int compare(DataStore store1, DataStore store2) { return null; } + @Override + public List orderImageStoresOnFreeCapacity(List imageStores) { + List stores = new ArrayList<>(); + if (imageStores.size() > 1) { + imageStores.sort(new Comparator() { // Sort data stores based on free capacity + @Override + public int compare(DataStore store1, DataStore store2) { + return Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store1), + _statsCollector.imageStoreCurrentFreeCapacity(store2)); + } + }); + for (DataStore imageStore : imageStores) { + // Return image store if used percentage is less then threshold value i.e. 90%. + if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { + stores.add(imageStore); + } + } + } else if (imageStores.size() == 1) { + if (_statsCollector.imageStoreHasEnoughCapacity(imageStores.get(0))) { + stores.add(imageStores.get(0)); + } + } + return stores; + } + @Override public List listImageStoresWithFreeCapacity(List imageStores) { List stores = new ArrayList<>(); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java index 25f27a23c1ed..86030f226f63 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/store/TemplateObject.java @@ -208,7 +208,9 @@ public void processEvent(ObjectInDataStoreStateMachine.Event event, Answer answe CopyCmdAnswer cpyAnswer = (CopyCmdAnswer)answer; TemplateObjectTO newTemplate = (TemplateObjectTO)cpyAnswer.getNewData(); TemplateDataStoreVO templateStoreRef = templateStoreDao.findByStoreTemplate(getDataStore().getId(), getId()); - templateStoreRef.setInstallPath(newTemplate.getPath()); + if (newTemplate.getPath() != null) { + templateStoreRef.setInstallPath(newTemplate.getPath()); + } templateStoreRef.setDownloadPercent(100); templateStoreRef.setDownloadState(Status.DOWNLOADED); templateStoreRef.setSize(newTemplate.getSize()); diff --git a/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml b/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml index 5c7b05b756a1..a280e13a8cfb 100644 --- a/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml +++ b/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml @@ -34,6 +34,10 @@ + + + + diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index 65d6fa52e667..df5bc8174d0f 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -18,12 +18,12 @@ */ package org.apache.cloudstack.storage.snapshot; +import java.util.ArrayList; import java.util.Date; +import java.util.List; import javax.inject.Inject; -import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; @@ -40,6 +40,7 @@ import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.to.SnapshotObjectTO; +import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.to.DataObjectType; @@ -129,6 +130,24 @@ public SnapshotInfo getChild() { return snapshotFactory.getSnapshot(vo.getId(), store); } + @Override + public List getChildren() { + QueryBuilder sc = QueryBuilder.create(SnapshotDataStoreVO.class); + sc.and(sc.entity().getDataStoreId(), Op.EQ, store.getId()); + sc.and(sc.entity().getRole(), Op.EQ, store.getRole()); + sc.and(sc.entity().getState(), Op.NIN, State.Destroying, State.Destroyed, State.Error); + sc.and(sc.entity().getParentSnapshotId(), Op.EQ, getId()); + List vos = sc.list(); + if (vos == null) { + return null; + } + List children = new ArrayList<>(); + for (SnapshotDataStoreVO vo : vos ) { + children.add(snapshotFactory.getSnapshot(vo.getId(), store)); + } + return children; + } + @Override public boolean isRevertable() { SnapshotStrategy snapshotStrategy = storageStrategyFactory.getSnapshotStrategy(snapshot, SnapshotOperation.REVERT); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java index 51421e4cd3dd..ceb26b38c2ef 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java @@ -72,6 +72,11 @@ public List getImageStoresByScope(ZoneScope scope) { return imageDataStoreMgr.listImageStoresByScope(scope); } + @Override + public List getImageStoresByScopeExcludingReadOnly(ZoneScope scope) { + return imageDataStoreMgr.listImageStoresByScopeExcludingReadOnly(scope); + } + @Override public DataStore getRandomImageStore(long zoneId) { List stores = getImageStoresByScope(new ZoneScope(zoneId)); @@ -81,18 +86,36 @@ public DataStore getRandomImageStore(long zoneId) { return imageDataStoreMgr.getRandomImageStore(stores); } + @Override + public DataStore getRandomUsableImageStore(long zoneId) { + List stores = getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); + if (stores == null || stores.size() == 0) { + return null; + } + return imageDataStoreMgr.getRandomImageStore(stores); + } + @Override public DataStore getImageStoreWithFreeCapacity(long zoneId) { - List stores = getImageStoresByScope(new ZoneScope(zoneId)); + //List stores = getImageStoresByScope(new ZoneScope(zoneId)); + List stores = getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (stores == null || stores.size() == 0) { return null; } return imageDataStoreMgr.getImageStoreWithFreeCapacity(stores); } + @Override + public DataStore getImageStoreWithFreeCapacity(List imageStores) { + if (imageStores.isEmpty()) { + return null; + } + return imageDataStoreMgr.getImageStoreWithFreeCapacity(imageStores); + } + @Override public List listImageStoresWithFreeCapacity(long zoneId) { - List stores = getImageStoresByScope(new ZoneScope(zoneId)); + List stores = getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (stores == null || stores.size() == 0) { return null; } @@ -107,6 +130,11 @@ public boolean isRegionStore(DataStore store) { return false; } + @Override + public List orderImageStoresOnFreeCapacity(List stores) { + return imageDataStoreMgr.orderImageStoresOnFreeCapacity(stores); + } + @Override public DataStore getPrimaryDataStore(long storeId) { return primaryStoreMgr.getPrimaryDataStore(storeId); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java index 062e89a4247f..2a12feb5c534 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java @@ -104,6 +104,14 @@ public ObjectInDataStoreManagerImpl() { // TODO: further investigate why an extra event is sent when it is // alreay Ready for DownloadListener stateMachines.addTransition(State.Ready, Event.OperationSuccessed, State.Ready); + // State transitions for data object migration + stateMachines.addTransition(State.Ready, Event.MigrationRequested, State.Migrating); + stateMachines.addTransition(State.Ready, Event.CopyRequested, State.Copying); + stateMachines.addTransition(State.Allocated, Event.MigrationRequested, State.Migrating); + stateMachines.addTransition(State.Migrating, Event.MigrationFailed, State.Failed); + stateMachines.addTransition(State.Migrating, Event.MigrationSucceeded, State.Destroyed); + stateMachines.addTransition(State.Migrating, Event.OperationSuccessed, State.Ready); + stateMachines.addTransition(State.Migrating, Event.OperationFailed, State.Ready); } @Override diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java index 6e8bdaf4b8c1..09b4b1ab3853 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/endpoint/DefaultEndPointSelector.java @@ -264,6 +264,27 @@ protected EndPoint findEndpointForImageStorage(DataStore store) { return RemoteHostEndPoint.getHypervisorHostEndPoint(host); } + @Override + public List findAllEndpointsForScope(DataStore store) { + Long dcId = null; + Scope storeScope = store.getScope(); + if (storeScope.getScopeType() == ScopeType.ZONE) { + dcId = storeScope.getScopeId(); + } + // find ssvm that can be used to download data to store. For zone-wide + // image store, use SSVM for that zone. For region-wide store, + // we can arbitrarily pick one ssvm to do that task + List ssAHosts = listUpAndConnectingSecondaryStorageVmHost(dcId); + if (ssAHosts == null || ssAHosts.isEmpty()) { + return null; + } + List endPoints = new ArrayList(); + for (HostVO host: ssAHosts) { + endPoints.add(RemoteHostEndPoint.getHypervisorHostEndPoint(host)); + } + return endPoints; + } + private List listUpAndConnectingSecondaryStorageVmHost(Long dcId) { QueryBuilder sc = QueryBuilder.create(HostVO.class); if (dcId != null) { @@ -333,7 +354,7 @@ public EndPoint select(DataStore store) { } } - private EndPoint getEndPointFromHostId(Long hostId) { + public EndPoint getEndPointFromHostId(Long hostId) { HostVO host = hostDao.findById(hostId); return RemoteHostEndPoint.getHypervisorHostEndPoint(host); } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index dec9b76dbc84..3e0b5fb70b8d 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -20,6 +20,9 @@ import java.net.URI; import java.net.URISyntaxException; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; import java.util.ArrayList; import java.util.Date; import java.util.List; @@ -27,14 +30,6 @@ import javax.inject.Inject; -import com.cloud.agent.api.storage.OVFPropertyTO; -import com.cloud.storage.Upload; -import com.cloud.storage.dao.TemplateOVFPropertiesDao; -import com.cloud.storage.TemplateOVFPropertyVO; -import com.cloud.utils.crypt.DBEncryptionUtil; -import org.apache.commons.collections.CollectionUtils; -import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.subsystem.api.storage.CopyCommandResult; import org.apache.cloudstack.engine.subsystem.api.storage.CreateCmdResult; import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; @@ -47,24 +42,38 @@ import org.apache.cloudstack.framework.async.AsyncRpcContext; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.command.CommandResult; +import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.DeleteCommand; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.endpoint.DefaultEndPointSelector; +import org.apache.commons.collections.CollectionUtils; +import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.CreateDatadiskTemplateCommand; import com.cloud.agent.api.storage.DownloadAnswer; import com.cloud.agent.api.storage.GetDatadisksAnswer; import com.cloud.agent.api.storage.GetDatadisksCommand; +import com.cloud.agent.api.storage.OVFPropertyTO; import com.cloud.agent.api.to.DataObjectType; import com.cloud.agent.api.to.DataTO; +import com.cloud.agent.api.to.DatadiskTO; +import com.cloud.agent.api.to.NfsTO; import com.cloud.alert.AlertManager; +import com.cloud.configuration.Config; +import com.cloud.host.dao.HostDao; +import com.cloud.secstorage.CommandExecLogDao; +import com.cloud.secstorage.CommandExecLogVO; +import com.cloud.storage.StorageManager; +import com.cloud.storage.TemplateOVFPropertyVO; +import com.cloud.storage.Upload; import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; import com.cloud.storage.VolumeVO; +import com.cloud.storage.dao.TemplateOVFPropertiesDao; import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateDetailsDao; import com.cloud.storage.dao.VMTemplateZoneDao; @@ -72,9 +81,12 @@ import com.cloud.storage.download.DownloadMonitor; import com.cloud.user.ResourceLimitService; import com.cloud.user.dao.AccountDao; -import com.cloud.agent.api.to.DatadiskTO; -import com.cloud.utils.net.Proxy; +import com.cloud.utils.NumbersUtil; +import com.cloud.utils.crypt.DBEncryptionUtil; +import com.cloud.utils.db.TransactionLegacy; import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.utils.net.Proxy; +import com.cloud.vm.dao.SecondaryStorageVmDao; public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { private static final Logger s_logger = Logger.getLogger(BaseImageStoreDriverImpl.class); @@ -106,6 +118,14 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { ResourceLimitService _resourceLimitMgr; @Inject TemplateOVFPropertiesDao templateOvfPropertiesDao; + @Inject + HostDao hostDao; + @Inject + CommandExecLogDao _cmdExecLogDao; + @Inject + StorageManager storageMgr; + @Inject + protected SecondaryStorageVmDao _secStorageVmDao; protected String _proxy = null; @@ -333,10 +353,64 @@ public void deleteAsync(DataStore dataStore, DataObject data, AsyncCompletionCal @Override public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCallback callback) { + if (!canCopy(srcdata, destData)) { + return; + } + + if ((srcdata.getType() == DataObjectType.TEMPLATE && destData.getType() == DataObjectType.TEMPLATE) || + (srcdata.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.SNAPSHOT) || + (srcdata.getType() == DataObjectType.VOLUME && destData.getType() == DataObjectType.VOLUME)) { + + int nMaxExecutionMinutes = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageCmdExecutionTimeMax.key()), 30); + int maxConcurrentCopyOpsPerSSVM = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageCopyCmdMaxSessions.key()), 2); + CopyCommand cmd = new CopyCommand(srcdata.getTO(), destData.getTO(), nMaxExecutionMinutes * 60 * 1000, true); + Answer answer = null; + + // Select host endpoint such that the load is balanced out + List eps = _epSelector.findAllEndpointsForScope(srcdata.getDataStore()); + if (eps.isEmpty()) { + String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; + s_logger.error(errMsg); + answer = new Answer(cmd, false, errMsg); + } else { + boolean sent = false; + // Find the first endpoint to which the command can be sent to + for (EndPoint ep : eps) { + s_logger.debug("PEARL - number of cmds running on "+ep.getId()+" is: "+getCopyCmdsCountToSpecificSSVM(ep.getId())); + if (getCopyCmdsCountToSpecificSSVM(ep.getId()) >= maxConcurrentCopyOpsPerSSVM) { + continue; + } + + CommandExecLogVO execLog = new CommandExecLogVO(ep.getId(), _secStorageVmDao.findByInstanceName(hostDao.findById(ep.getId()).getName()).getId(), cmd.getClass().getSimpleName(), 1); + Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); + answer = ep.sendMessage(cmd); + answer.setContextParam("cmd", cmdExecId.toString()); + sent = true; + break; + } + // If both SSVMs are pre-occupied with tasks, choose the SSVM with least migrate jobs + if (!sent) { + // Picking endpoint with least number of copy commands running on it + Long epId = ssvmWithLeastMigrateJobs(); + s_logger.debug("PEARL - edpoint : "+ epId); + EndPoint endPoint = _defaultEpSelector.getEndPointFromHostId(epId); + CommandExecLogVO execLog = new CommandExecLogVO(epId, _secStorageVmDao.findByInstanceName(hostDao.findById(epId).getName()).getId(), cmd.getClass().getSimpleName(), 1); + Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); + answer = endPoint.sendMessage(cmd); + answer.setContextParam("cmd", cmdExecId.toString()); + } + } + + CopyCommandResult result = new CopyCommandResult("", answer); + callback.complete(result); + } } @Override public boolean canCopy(DataObject srcData, DataObject destData) { + if (srcData.getDataStore().getTO() instanceof NfsTO && destData.getDataStore().getTO() instanceof NfsTO) { + return true; + } return false; } @@ -399,4 +473,26 @@ public Void createDataDiskTemplateAsync(TemplateInfo dataDiskTemplate, String pa callback.complete(result); return null; } + + private Integer getCopyCmdsCountToSpecificSSVM(Long ssvmId) { + return _cmdExecLogDao.getCopyCmdCountForSSVM(ssvmId); + } + + private Long ssvmWithLeastMigrateJobs() { + s_logger.debug("PEARL - picking ssvm from the pool with least commands running on it"); + String query = "select host_id, count(*) from cmd_exec_log group by host_id order by 2 limit 1;"; + TransactionLegacy txn = TransactionLegacy.currentTxn(); + + Long epId = null; + PreparedStatement pstmt = null; + try { + pstmt = txn.prepareAutoCloseStatement(query); + ResultSet rs = pstmt.executeQuery(); + rs.absolute(1); + epId = (long) rs.getInt(1); + } catch (SQLException e) { + s_logger.debug("SQLException caught", e); + } + return epId; + } } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java index 01f2100f77f1..cb46d480f291 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java @@ -36,6 +36,8 @@ public interface ImageStoreProviderManager { List listImageStoresByScope(ZoneScope scope); + List listImageStoresByScopeExcludingReadOnly(ZoneScope scope); + List listImageStoreByProvider(String provider); List listImageCacheStores(Scope scope); @@ -76,4 +78,11 @@ public interface ImageStoreProviderManager { * @return the list of DataStore which have free capacity */ List listImageStoresWithFreeCapacity(List imageStores); + + /** + * Returns the provided list of Datastores in descending order of their free capacity + * @param imageStores list of image stores that need to be arranged + * @return sorted list + */ + List orderImageStoresOnFreeCapacity(List imageStores); } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java index 2372e8444cc5..7572131353cc 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java @@ -299,7 +299,8 @@ public List listByTemplateStoreDownloadStatus(long template @Override public List listByTemplateZoneDownloadStatus(long templateId, Long zoneId, Status... status) { // get all elgible image stores - List imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); + //List imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); + List imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (imgStores != null) { List result = new ArrayList(); for (DataStore store : imgStores) { @@ -326,7 +327,8 @@ public void removeByTemplateStore(long templateId, long imageStoreId) { @Override public TemplateDataStoreVO findByTemplateZoneDownloadStatus(long templateId, Long zoneId, Status... status) { // get all elgible image stores - List imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); + //List imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); + List imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (imgStores != null) { for (DataStore store : imgStores) { List sRes = listByTemplateStoreDownloadStatus(templateId, store.getId(), status); @@ -423,7 +425,8 @@ public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, Data // get all elgible image stores List imgStores = null; if (role == DataStoreRole.Image) { - imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); + //imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); + imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); } else if (role == DataStoreRole.ImageCache) { imgStores = _storeMgr.getImageCacheStores(new ZoneScope(zoneId)); } @@ -441,7 +444,8 @@ public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, Data @Override public TemplateDataStoreVO findByTemplateZoneReady(long templateId, Long zoneId) { List imgStores = null; - imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); + //imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); + imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (imgStores != null) { Collections.shuffle(imgStores); for (DataStore store : imgStores) { diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 690a1124402d..6494739204f1 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -20,9 +20,6 @@ import javax.inject.Inject; -import com.cloud.storage.MigrationOptions; -import org.apache.log4j.Logger; - import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; @@ -33,6 +30,7 @@ import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.cloudstack.storage.to.VolumeObjectTO; +import org.apache.log4j.Logger; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.DownloadAnswer; @@ -42,6 +40,7 @@ import com.cloud.offering.DiskOffering.DiskCacheMode; import com.cloud.storage.DataStoreRole; import com.cloud.storage.DiskOfferingVO; +import com.cloud.storage.MigrationOptions; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.ProvisioningType; import com.cloud.storage.Volume; @@ -392,7 +391,8 @@ public void processEvent(ObjectInDataStoreStateMachine.Event event) { if (event == ObjectInDataStoreStateMachine.Event.CreateOnlyRequested) { volEvent = Volume.Event.UploadRequested; } else if (event == ObjectInDataStoreStateMachine.Event.MigrationRequested) { - volEvent = Volume.Event.CopyRequested; + //volEvent = Volume.Event.CopyRequested; + volEvent = Volume.Event.MigrationRequested; } } else { if (event == ObjectInDataStoreStateMachine.Event.CreateRequested || event == ObjectInDataStoreStateMachine.Event.CreateOnlyRequested) { diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index 92c8a93f515b..284cd5c31510 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -1359,7 +1359,6 @@ protected Void copyVolumeFromImageToPrimaryCallback(AsyncCallbackDispatcher sc = asyncJobTypeSearch.create(); sc.setParameters("status", JobInfo.Status.IN_PROGRESS); sc.setParameters("job_cmd", (Object[])cmds); - sc.setParameters("job_info", "%" + havingInfo + "%"); + if (havingInfo != null) { + sc.setParameters("job_info", "%" + havingInfo + "%"); + } List results = customSearch(sc, null); return results.get(0); } diff --git a/server/pom.xml b/server/pom.xml index deadd28a1dc3..730ef0c0c6b5 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -89,6 +89,11 @@ commons-codec commons-codec + + org.apache.commons + commons-math3 + 3.6.1 + org.apache.cloudstack cloud-utils diff --git a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java index 267c81ae84e4..70a920f5a161 100644 --- a/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java +++ b/server/src/main/java/com/cloud/api/query/QueryManagerImpl.java @@ -119,6 +119,7 @@ import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.query.QueryService; import org.apache.cloudstack.resourcedetail.dao.DiskOfferingDetailsDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Logger; import org.springframework.stereotype.Component; @@ -411,6 +412,9 @@ public class QueryManagerImpl extends MutualExclusiveIdsManagerBase implements Q @Inject private RouterHealthCheckResultDao routerHealthCheckResultDao; + @Inject + private TemplateDataStoreDao templateDataStoreDao; + /* * (non-Javadoc) * @@ -2464,6 +2468,7 @@ private Pair, Integer> searchForImageStoresInternal(ListI Object keyword = cmd.getKeyword(); Long startIndex = cmd.getStartIndex(); Long pageSize = cmd.getPageSizeVal(); + Boolean readonly = cmd.getReadonly(); Filter searchFilter = new Filter(ImageStoreJoinVO.class, "id", Boolean.TRUE, startIndex, pageSize); @@ -2476,6 +2481,7 @@ private Pair, Integer> searchForImageStoresInternal(ListI sb.and("protocol", sb.entity().getProtocol(), SearchCriteria.Op.EQ); sb.and("provider", sb.entity().getProviderName(), SearchCriteria.Op.EQ); sb.and("role", sb.entity().getRole(), SearchCriteria.Op.EQ); + sb.and("readonly", sb.entity().isReadonly(), Op.EQ); SearchCriteria sc = sb.create(); sc.setParameters("role", DataStoreRole.Image); @@ -2504,7 +2510,9 @@ private Pair, Integer> searchForImageStoresInternal(ListI if (protocol != null) { sc.setParameters("protocol", protocol); } - + if (readonly != null) { + sc.setParameters("readonly", readonly); + } // search Store details by ids Pair, Integer> uniqueStorePair = _imageStoreJoinDao.searchAndCount(sc, searchFilter); Integer count = uniqueStorePair.second(); diff --git a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java index ced81a6e06c4..f01c2c42e1b5 100644 --- a/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java +++ b/server/src/main/java/com/cloud/api/query/ViewResponseHelper.java @@ -26,9 +26,6 @@ import java.util.List; import java.util.Map; -import com.cloud.configuration.Resource; -import com.cloud.domain.Domain; -import org.apache.log4j.Logger; import org.apache.cloudstack.affinity.AffinityGroupResponse; import org.apache.cloudstack.api.ApiConstants.DomainDetails; import org.apache.cloudstack.api.ApiConstants.HostDetails; @@ -59,6 +56,7 @@ import org.apache.cloudstack.api.response.VolumeResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.context.CallContext; +import org.apache.log4j.Logger; import com.cloud.api.ApiDBUtils; import com.cloud.api.query.vo.AccountJoinVO; @@ -84,8 +82,10 @@ import com.cloud.api.query.vo.UserAccountJoinVO; import com.cloud.api.query.vo.UserVmJoinVO; import com.cloud.api.query.vo.VolumeJoinVO; -import com.cloud.storage.StoragePoolTagVO; +import com.cloud.configuration.Resource; +import com.cloud.domain.Domain; import com.cloud.storage.Storage.ImageFormat; +import com.cloud.storage.StoragePoolTagVO; import com.cloud.storage.VolumeStats; import com.cloud.user.Account; diff --git a/server/src/main/java/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java index 7734489dd676..96ba6ec73cd1 100644 --- a/server/src/main/java/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/ImageStoreJoinDaoImpl.java @@ -65,6 +65,7 @@ public ImageStoreResponse newImageStoreResponse(ImageStoreJoinVO ids) { osResponse.setName(ids.getName()); osResponse.setProviderName(ids.getProviderName()); osResponse.setProtocol(ids.getProtocol()); + osResponse.setReadonly(ids.isReadonly()); String url = ids.getUrl(); //if store is type cifs, remove the password if(ids.getProtocol().equals("cifs".toString())) { diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 54686f73df2a..924d1edc387b 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -25,10 +25,6 @@ import javax.inject.Inject; -import org.apache.cloudstack.utils.security.DigestHelper; -import org.apache.log4j.Logger; -import org.springframework.stereotype.Component; - import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.response.ChildTemplateResponse; import org.apache.cloudstack.api.response.TemplateResponse; @@ -36,6 +32,12 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.cloudstack.utils.security.DigestHelper; +import org.apache.log4j.Logger; +import org.springframework.stereotype.Component; import com.cloud.api.ApiDBUtils; import com.cloud.api.ApiResponseHelper; @@ -68,6 +70,10 @@ public class TemplateJoinDaoImpl extends GenericDaoBaseWithTagInformation tmpltIdPairSearch; @@ -131,7 +137,18 @@ private String getTemplateStatus(TemplateJoinVO template) { @Override public TemplateResponse newTemplateResponse(ResponseView view, TemplateJoinVO template) { + List templatesInStore = _templateStoreDao.listByTemplate(template.getId()); + List dowloadProgressDetails = new ArrayList(); + HashMap downloadDetailInImageStores = null; + for (TemplateDataStoreVO templateInStore : templatesInStore) { + downloadDetailInImageStores = new HashMap<>(); + downloadDetailInImageStores.put("datastore", dataStoreDao.findById(templateInStore.getDataStoreId()).getName()); + downloadDetailInImageStores.put("dowloadPercent", Integer.toString(templateInStore.getDownloadPercent())); + downloadDetailInImageStores.put("dowloadState", (templateInStore.getDownloadState() != null ? templateInStore.getDownloadState().toString() : "")); + dowloadProgressDetails.add(downloadDetailInImageStores); + } TemplateResponse templateResponse = new TemplateResponse(); + templateResponse.setDownloadProgress(dowloadProgressDetails); templateResponse.setId(template.getUuid()); templateResponse.setName(template.getName()); templateResponse.setDisplayText(template.getDisplayText()); diff --git a/server/src/main/java/com/cloud/api/query/vo/ImageStoreJoinVO.java b/server/src/main/java/com/cloud/api/query/vo/ImageStoreJoinVO.java index 244f89ec3c2b..bcc73cb47bf5 100644 --- a/server/src/main/java/com/cloud/api/query/vo/ImageStoreJoinVO.java +++ b/server/src/main/java/com/cloud/api/query/vo/ImageStoreJoinVO.java @@ -67,6 +67,9 @@ public class ImageStoreJoinVO extends BaseViewVO implements InternalIdentity, Id @Enumerated(value = EnumType.STRING) private DataStoreRole role; + @Column(name = "readonly") + private boolean readonly = false; + @Column(name = "data_center_id") private long zoneId; @@ -128,4 +131,8 @@ public DataStoreRole getRole() { public Date getRemoved() { return removed; } + + public boolean isReadonly() { + return readonly; + } } diff --git a/server/src/main/java/com/cloud/configuration/Config.java b/server/src/main/java/com/cloud/configuration/Config.java index 3daf720138c1..b1ec5bd654b0 100644 --- a/server/src/main/java/com/cloud/configuration/Config.java +++ b/server/src/main/java/com/cloud/configuration/Config.java @@ -16,6 +16,15 @@ // under the License. package com.cloud.configuration; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.StringTokenizer; + +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.framework.config.ConfigKey; + import com.cloud.agent.AgentManager; import com.cloud.consoleproxy.ConsoleProxyManager; import com.cloud.ha.HighAvailabilityManager; @@ -29,14 +38,6 @@ import com.cloud.template.TemplateManager; import com.cloud.vm.UserVmManager; import com.cloud.vm.snapshot.VMSnapshotManager; -import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; -import org.apache.cloudstack.framework.config.ConfigKey; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.StringTokenizer; /** * @deprecated use the more dynamic ConfigKey @@ -1808,7 +1809,10 @@ public enum Config { // StatsCollector StatsOutPutGraphiteHost("Advanced", ManagementServer.class, String.class, "stats.output.uri", "", "URI to additionally send StatsCollector statistics to", null), - SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "", "PSK with SSVM", null); + SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "", "PSK with SSVM", null), + + SecStorageCopyCmdMaxSessions( + "Advanced", AgentManager.class, Integer.class, "secstorage.cpy.cmd.max.sessions","2","The max number of concurrent copy command execution sessions that an SSVM can handle",null); private final String _category; private final Class _componentClass; diff --git a/server/src/main/java/com/cloud/server/ManagementServerImpl.java b/server/src/main/java/com/cloud/server/ManagementServerImpl.java index a3e9bb655940..cfb6860896ad 100644 --- a/server/src/main/java/com/cloud/server/ManagementServerImpl.java +++ b/server/src/main/java/com/cloud/server/ManagementServerImpl.java @@ -195,8 +195,10 @@ import org.apache.cloudstack.api.command.admin.storage.ListStoragePoolsCmd; import org.apache.cloudstack.api.command.admin.storage.ListStorageProvidersCmd; import org.apache.cloudstack.api.command.admin.storage.ListStorageTagsCmd; +import org.apache.cloudstack.api.command.admin.storage.MigrateSecondaryStorageDataCmd; import org.apache.cloudstack.api.command.admin.storage.PreparePrimaryStorageForMaintenanceCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateCloudToUseObjectStoreCmd; +import org.apache.cloudstack.api.command.admin.storage.UpdateImageStoreCmd; import org.apache.cloudstack.api.command.admin.storage.UpdateStoragePoolCmd; import org.apache.cloudstack.api.command.admin.swift.AddSwiftCmd; import org.apache.cloudstack.api.command.admin.swift.ListSwiftsCmd; @@ -2753,6 +2755,7 @@ public List> getCommands() { cmdList.add(FindStoragePoolsForMigrationCmd.class); cmdList.add(PreparePrimaryStorageForMaintenanceCmd.class); cmdList.add(UpdateStoragePoolCmd.class); + cmdList.add(UpdateImageStoreCmd.class); cmdList.add(DestroySystemVmCmd.class); cmdList.add(ListSystemVMsCmd.class); cmdList.add(MigrateSystemVMCmd.class); @@ -3144,6 +3147,7 @@ public List> getCommands() { cmdList.add(ListTemplateOVFProperties.class); cmdList.add(GetRouterHealthCheckResultsCmd.class); cmdList.add(StartRollingMaintenanceCmd.class); + cmdList.add(MigrateSecondaryStorageDataCmd.class); // Out-of-band management APIs for admins cmdList.add(EnableOutOfBandManagementForHostCmd.class); diff --git a/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java b/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java index 3e27ce6ab490..4ed7962db97f 100755 --- a/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java @@ -30,7 +30,6 @@ import com.google.common.base.Preconditions; public class ImageStoreDetailsUtil { - @Inject protected ImageStoreDao imageStoreDao; @Inject diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java new file mode 100644 index 000000000000..001590258a07 --- /dev/null +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -0,0 +1,123 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package com.cloud.storage; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; +import javax.naming.ConfigurationException; + +import org.apache.cloudstack.api.command.admin.storage.MigrateSecondaryStorageDataCmd; +import org.apache.cloudstack.api.response.MigrationResponse; +import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; +import org.apache.log4j.Logger; + +import com.cloud.utils.component.ManagerBase; +import com.cloud.utils.exception.CloudRuntimeException; + +public class ImageStoreServiceImpl extends ManagerBase implements ImageStoreService { + + private static final Logger s_logger = Logger.getLogger(ImageStoreServiceImpl.class); + @Inject + ImageStoreDao imageStoreDao; + @Inject + private AsyncJobManager jobMgr; + @Inject + private StorageOrchestrationService stgService; + + ConfigKey ImageStoreImbalanceThreshold = new ConfigKey<>("Advanced", Double.class, + "image.store.imbalance.threshold", + "0.5", + "The storage imbalance threshold that is compared with the standard deviation percentage for a storage utilization metric. " + + "The value is a percentage in decimal format.", + true, ConfigKey.Scope.Global); + + + public Integer numConcurrentCopyTasksPerSSVM = null; + + + + @Override + public boolean configure(String name, Map params) throws ConfigurationException { + return true; + } + + @Override + public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { + Long srcImgStoreId = cmd.getId(); + ImageStoreVO srcImageVO = imageStoreDao.findById(srcImgStoreId); + List destImgStoreIds = cmd.getMigrateTo(); + String migrationType = cmd.getMigrationType(); + + String message = null; + + if (srcImageVO == null) { + throw new CloudRuntimeException("Cannot find secondary storage with id: " + srcImgStoreId); + } + + if (srcImageVO.getRole() != DataStoreRole.Image) { + throw new CloudRuntimeException("Secondary storage is not of Image Role"); + } + + if (destImgStoreIds.contains(srcImgStoreId)) { + s_logger.debug("One of the destination stores is the same as the source image store ... Ignoring it..."); + destImgStoreIds.remove(srcImgStoreId); + } + + // Validate all the Ids correspond to valid Image stores + List destDatastores = new ArrayList<>(); + for (Long id : destImgStoreIds) { + if (imageStoreDao.findById(id) == null) { + s_logger.warn("Secondary storage with id: " + id + "is not found. Skipping it..."); + continue; + } + if (imageStoreDao.findById(id).isReadonly()) { + s_logger.warn("Secondary storage: "+ id + " cannot be considered for migration as has read-only permission, Skipping it..."); + continue; + } + destDatastores.add(id); + } + + if (destDatastores.size() < 1) { + throw new CloudRuntimeException("Invalid destination image store(s) provided. Terminating Migration of data"); + } + + if (isMigrateJobRunning()){ + message = "A migrate job is in progress, please try again later..."; + return new MigrationResponse(message, migrationType, false); + } + + return stgService.migrateData(srcImgStoreId, destDatastores, migrationType, cmd.getTemp()); + } + + + // Ensures that only one migrate job may occur at a time, in order to reduce load + private boolean isMigrateJobRunning() { + long count = jobMgr.countPendingJobs(null, MigrateSecondaryStorageDataCmd.class.getName()); + if (count > 1) { + return true; + } + return false; + } +} diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 79343ab4725f..d11a2fddea7a 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -1358,7 +1358,7 @@ public void cleanupSecondaryStorage(boolean recurring) { // so here we don't need to issue DeleteCommand to resource anymore, only need to remove db entry. try { // Cleanup templates in template_store_ref - List imageStores = _dataStoreMgr.getImageStoresByScope(new ZoneScope(null)); + List imageStores = _dataStoreMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(null)); for (DataStore store : imageStores) { try { long storeId = store.getId(); @@ -2144,6 +2144,18 @@ public ImageStore migrateToObjectStore(String name, String url, String providerN return discoverImageStore(name, url, providerName, null, details); } + @Override + public ImageStore updateImageStoreStatus(Long id, Boolean readonly) { + // Input validation + ImageStoreVO imageStoreVO = _imageStoreDao.findById(id); + if (imageStoreVO == null) { + throw new IllegalArgumentException("Unable to find image store with ID: " + id); + } + imageStoreVO.setReadonly(readonly); + _imageStoreDao.update(id, imageStoreVO); + return imageStoreVO; + } + private void duplicateCacheStoreRecordsToRegionStore(long storeId) { _templateStoreDao.duplicateCacheRecordsOnRegionStore(storeId); _snapshotStoreDao.duplicateCacheRecordsOnRegionStore(storeId); diff --git a/server/src/main/java/com/cloud/storage/download/DownloadListener.java b/server/src/main/java/com/cloud/storage/download/DownloadListener.java index 51f9d42980cc..25dffb3e8afa 100644 --- a/server/src/main/java/com/cloud/storage/download/DownloadListener.java +++ b/server/src/main/java/com/cloud/storage/download/DownloadListener.java @@ -297,7 +297,7 @@ else if ( cmd instanceof StartupStorageCommand) { }*/ else if (cmd instanceof StartupSecondaryStorageCommand) { try{ - List imageStores = _storeMgr.getImageStoresByScope(new ZoneScope(agent.getDataCenterId())); + List imageStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(agent.getDataCenterId())); for (DataStore store : imageStores) { _volumeSrv.handleVolumeSync(store); _imageSrv.handleTemplateSync(store); diff --git a/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageVmManager.java b/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageVmManager.java index 5c50d46f4dda..b57b44303209 100644 --- a/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageVmManager.java +++ b/server/src/main/java/com/cloud/storage/secondary/SecondaryStorageVmManager.java @@ -23,6 +23,7 @@ import com.cloud.host.HostVO; import com.cloud.utils.Pair; import com.cloud.utils.component.Manager; +import com.cloud.vm.SecondaryStorageVm; import com.cloud.vm.SecondaryStorageVmVO; public interface SecondaryStorageVmManager extends Manager { @@ -31,6 +32,7 @@ public interface SecondaryStorageVmManager extends Manager { public static final int DEFAULT_SS_VM_CPUMHZ = 500; // 500 MHz public static final int DEFAULT_SS_VM_MTUSIZE = 1500; public static final int DEFAULT_SS_VM_CAPACITY = 50; // max command execution session per SSVM + public static final int DEFAULT_MIGRATE_SS_VM_CAPACITY = 2; // number of concurrent migrate operations to happen per SSVM public static final int DEFAULT_STANDBY_CAPACITY = 10; // standy capacity to reserve per zone public static final String ALERT_SUBJECT = "secondarystoragevm-alert"; @@ -56,4 +58,6 @@ public interface SecondaryStorageVmManager extends Manager { public List listUpAndConnectingSecondaryStorageVmHost(Long dcId); public HostVO pickSsvmHost(HostVO ssHost); + + void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role); } diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index 85c4a77774e8..f9b69fe5741e 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -16,10 +16,6 @@ // under the License. package com.cloud.template; -import com.cloud.agent.api.Answer; -import com.cloud.host.HostVO; -import com.cloud.hypervisor.Hypervisor; -import com.cloud.resource.ResourceManager; import java.util.Collections; import java.util.HashSet; import java.util.LinkedList; @@ -29,26 +25,18 @@ import javax.inject.Inject; -import com.cloud.configuration.Config; -import com.cloud.utils.db.Transaction; -import com.cloud.utils.db.TransactionCallback; -import com.cloud.utils.db.TransactionStatus; import org.apache.cloudstack.agent.directdownload.CheckUrlAnswer; import org.apache.cloudstack.agent.directdownload.CheckUrlCommand; -import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd; -import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd; -import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; -import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; -import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; -import org.apache.cloudstack.utils.security.DigestHelper; -import org.apache.log4j.Logger; -import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.api.command.user.iso.DeleteIsoCmd; +import org.apache.cloudstack.api.command.user.iso.GetUploadParamsForIsoCmd; import org.apache.cloudstack.api.command.user.iso.RegisterIsoCmd; import org.apache.cloudstack.api.command.user.template.DeleteTemplateCmd; +import org.apache.cloudstack.api.command.user.template.GetUploadParamsForTemplateCmd; import org.apache.cloudstack.api.command.user.template.RegisterTemplateCmd; +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; +import org.apache.cloudstack.engine.subsystem.api.storage.EndPoint; import org.apache.cloudstack.engine.subsystem.api.storage.EndPointSelector; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; @@ -62,11 +50,17 @@ import org.apache.cloudstack.framework.async.AsyncRpcContext; import org.apache.cloudstack.framework.messagebus.MessageBus; import org.apache.cloudstack.framework.messagebus.PublishScope; +import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity; +import org.apache.cloudstack.utils.security.DigestHelper; +import org.apache.log4j.Logger; import com.cloud.agent.AgentManager; +import com.cloud.agent.api.Answer; import com.cloud.alert.AlertManager; +import com.cloud.configuration.Config; import com.cloud.configuration.Resource.ResourceType; import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.DataCenterDao; @@ -74,11 +68,11 @@ import com.cloud.event.UsageEventUtils; import com.cloud.exception.InvalidParameterValueException; import com.cloud.exception.ResourceAllocationException; +import com.cloud.host.HostVO; +import com.cloud.hypervisor.Hypervisor; import com.cloud.org.Grouping; +import com.cloud.resource.ResourceManager; import com.cloud.server.StatsCollector; -import com.cloud.template.VirtualMachineTemplate.State; -import com.cloud.user.Account; -import com.cloud.utils.Pair; import com.cloud.storage.ScopeType; import com.cloud.storage.Storage.ImageFormat; import com.cloud.storage.Storage.TemplateType; @@ -89,9 +83,15 @@ import com.cloud.storage.dao.VMTemplateDao; import com.cloud.storage.dao.VMTemplateZoneDao; import com.cloud.storage.download.DownloadMonitor; +import com.cloud.template.VirtualMachineTemplate.State; +import com.cloud.user.Account; +import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; import com.cloud.utils.db.DB; import com.cloud.utils.db.EntityManager; +import com.cloud.utils.db.Transaction; +import com.cloud.utils.db.TransactionCallback; +import com.cloud.utils.db.TransactionStatus; import com.cloud.utils.exception.CloudRuntimeException; public class HypervisorTemplateAdapter extends TemplateAdapterBase { @@ -241,7 +241,7 @@ public VMTemplateVO create(TemplateProfile profile) { private void createTemplateWithinZone(Long zId, TemplateProfile profile, VMTemplateVO template) { // find all eligible image stores for this zone scope - List imageStores = storeMgr.getImageStoresByScope(new ZoneScope(zId)); + List imageStores = storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zId)); if (imageStores == null || imageStores.size() == 0) { throw new CloudRuntimeException("Unable to find image store to download template " + profile.getTemplate()); } @@ -308,7 +308,7 @@ public List doInTransaction(TransactionStatus zoneId = profile.getZoneIdList().get(0); // find all eligible image stores for this zone scope - List imageStores = storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); + List imageStores = storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (imageStores == null || imageStores.size() == 0) { throw new CloudRuntimeException("Unable to find image store to download template " + profile.getTemplate()); } diff --git a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java index 749f272bf361..86eabe44ce20 100755 --- a/server/src/main/java/com/cloud/template/TemplateManagerImpl.java +++ b/server/src/main/java/com/cloud/template/TemplateManagerImpl.java @@ -754,7 +754,7 @@ public boolean copy(long userId, VMTemplateVO template, DataStore srcSecStore, D long tmpltId = template.getId(); long dstZoneId = dstZone.getId(); // find all eligible image stores for the destination zone - List dstSecStores = _dataStoreMgr.getImageStoresByScope(new ZoneScope(dstZoneId)); + List dstSecStores = _dataStoreMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(dstZoneId)); if (dstSecStores == null || dstSecStores.isEmpty()) { throw new StorageUnavailableException("Destination zone is not ready, no image store associated", DataCenter.class, dstZone.getId()); } diff --git a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java index 49ad2159698f..0184a44ff390 100644 --- a/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java +++ b/server/src/main/java/org/apache/cloudstack/diagnostics/DiagnosticsServiceImpl.java @@ -372,7 +372,7 @@ private VMInstanceVO getSecondaryStorageVmInZone(Long zoneId) { * @return a valid secondary storage with less than DiskQuotaPercentageThreshold set by global config */ private DataStore getImageStore(Long zoneId) { - List stores = storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); + List stores = storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (CollectionUtils.isEmpty(stores)) { throw new CloudRuntimeException("No Secondary storage found in Zone with Id: " + zoneId); } diff --git a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml index 1c90a97a70f6..5449664e9d0a 100644 --- a/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml +++ b/server/src/main/resources/META-INF/cloudstack/core/spring-server-core-managers-context.xml @@ -260,6 +260,8 @@ + + migrationSSVMS = new ArrayList<>(); @Inject SecondaryStorageVmDao _secStorageVmDao; @@ -63,6 +71,7 @@ public class PremiumSecondaryStorageManagerImpl extends SecondaryStorageManagerI @Inject ResourceManager _resourceMgr; protected SearchBuilder activeCommandSearch; + protected SearchBuilder activeCopyCommandSearch; protected SearchBuilder hostSearch; @Override @@ -83,8 +92,15 @@ public boolean configure(String name, Map params) throws Configu activeCommandSearch.and("created", activeCommandSearch.entity().getCreated(), Op.GTEQ); activeCommandSearch.join("hostSearch", hostSearch, activeCommandSearch.entity().getInstanceId(), hostSearch.entity().getId(), JoinType.INNER); + activeCopyCommandSearch = _cmdExecLogDao.createSearchBuilder(); + activeCopyCommandSearch.and("created", activeCopyCommandSearch.entity().getCreated(), Op.GTEQ); + activeCopyCommandSearch.and("command_name", activeCopyCommandSearch.entity().getCommandName(), Op.EQ); + activeCopyCommandSearch.join("hostSearch", hostSearch, activeCopyCommandSearch.entity().getHostId(), hostSearch.entity().getId(), JoinType.INNER); + + hostSearch.done(); activeCommandSearch.done(); + activeCopyCommandSearch.done(); return true; } @@ -96,7 +112,6 @@ public Pair scanPool(Long pool) { } Date cutTime = new Date(DateUtil.currentGMTTime().getTime() - _maxExecutionTimeMs); - _cmdExecLogDao.expungeExpiredRecords(cutTime); boolean suspendAutoLoading = !reserveStandbyCapacity(); @@ -136,12 +151,45 @@ public Pair scanPool(Long pool) { alreadyRunning = _secStorageVmDao.getSecStorageVmListInStates(null, dataCenterId, State.Running, State.Migrating, State.Starting); - List activeCmds = findActiveCommands(dataCenterId, cutTime); + List activeCmds = findActiveCommands(dataCenterId, cutTime, null); + + nMaxExecutionMinutes = 240; + + List copyCmdsInPipeline = findAllActiveCopyCommands(dataCenterId, cutTime); + Integer hostsCount = _hostDao.countAllByType(Host.Type.Routing); + Integer maxSsvms = (hostsCount < MaxNumberOfSsvmsForMigration.value()) ? hostsCount : MaxNumberOfSsvmsForMigration.value(); + + currentTime = DateUtil.currentGMTTime().getTime(); + s_logger.debug("PEARL - current time: "+ currentTime); + s_logger.debug("PEARL - next spawn time: "+nextSpawnTime); + if (alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() < _standbyCapacity) { s_logger.info("secondary storage command execution standby capactiy low (running VMs: " + alreadyRunning.size() + ", active cmds: " + activeCmds.size() + "), starting a new one"); return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.commandExecutor); } + // Scale the number of SSVMs if the number of Copy operations is greater than the number of SSVMs running and the copy operation has been in pipeline for + // more than half of the total time allocated for secondary storage operations + else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= alreadyRunning.size() && + (((currentTime - copyCmdsInPipeline.get(alreadyRunning.size() - 1).getCreated().getTime()) /1000 > nMaxExecutionMinutes/2)) && + (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { + nextSpawnTime = currentTime + nMaxExecutionMinutes * 1000; + s_logger.debug("PEARL - scale SSVM!!!"); + return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.templateProcessor); + } + + // Scale down the number of SSVMs if the load on then has reduced + if ((copyCmdsInPipeline.size() < alreadyRunning.size() && alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() > _standbyCapacity) && alreadyRunning.size() > 1) { + Collections.reverse(alreadyRunning); + for(SecondaryStorageVmVO vm : alreadyRunning) { + long count = copyCmdsInPipeline.stream().map(cmd -> cmd.getInstanceId() == vm.getId()).count(); + count += activeCmds.stream().map(cmd -> cmd.getInstanceId() == vm.getId()).count(); + if (count == 0) { + destroySecStorageVm(vm.getId()); + break; + } + } + } } return new Pair(AfterScanAction.nop, null); @@ -163,22 +211,30 @@ public Pair assignSecStorageVm(long zoneId, Comman return null; } - private List findActiveCommands(long dcId, Date cutTime) { + private List findActiveCommands(long dcId, Date cutTime, String cmdName) { SearchCriteria sc = activeCommandSearch.create(); - sc.setParameters("created", cutTime); sc.setJoinParameters("hostSearch", "dc", dcId); sc.setJoinParameters("hostSearch", "status", Status.Up); - + List result = _cmdExecLogDao.search(sc, null); return _cmdExecLogDao.search(sc, null); } + private List findAllActiveCopyCommands(long dcId, Date cutTime) { + SearchCriteria sc = activeCopyCommandSearch.create(); + sc.setParameters("created", cutTime); + sc.setParameters("command_name", "CopyCommand"); + sc.setJoinParameters("hostSearch", "dc", dcId); + sc.setJoinParameters("hostSearch", "status", Status.Up); + List result = _cmdExecLogDao.search(sc, null); + return result; + } + private boolean reserveStandbyCapacity() { String value = _configDao.getValue(Config.SystemVMAutoReserveCapacity.key()); if (value != null && value.equalsIgnoreCase("true")) { return true; } - return false; } -} +} \ No newline at end of file diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java index a1a3873bf884..dbb9611d7007 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/SecondaryStorageManagerImpl.java @@ -266,6 +266,9 @@ public class SecondaryStorageManagerImpl extends ManagerBase implements Secondar static final ConfigKey NTPServerConfig = new ConfigKey(String.class, "ntp.server.list", "Advanced", null, "Comma separated list of NTP servers to configure in Secondary storage VM", false, ConfigKey.Scope.Global, null); + static final ConfigKey MaxNumberOfSsvmsForMigration = new ConfigKey("Advanced", Integer.class, "max.ssvm.count", "5", + "Number of additional SSVMs to handle migration of data objects concurrently", true, ConfigKey.Scope.Global); + public SecondaryStorageManagerImpl() { } @@ -720,7 +723,7 @@ public SecondaryStorageVmVO assignSecStorageVmFromStoppedPool(long dataCenterId, return null; } - private void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { + public void allocCapacity(long dataCenterId, SecondaryStorageVm.Role role) { if (s_logger.isTraceEnabled()) { s_logger.trace("Allocate secondary storage vm standby capacity for data center : " + dataCenterId); } @@ -822,7 +825,7 @@ public boolean isZoneReady(Map zoneHostInfoMap, long dataCen return false; } - List stores = _dataStoreMgr.getImageStoresByScope(new ZoneScope(dataCenterId)); + List stores = _dataStoreMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(dataCenterId)); if (stores.size() < 1) { s_logger.debug("No image store added in zone " + dataCenterId + ", wait until it is ready to launch secondary storage vm"); return false; @@ -1374,7 +1377,7 @@ public Pair scanPool(Long pool) { _secStorageVmDao.getSecStorageVmListInStates(SecondaryStorageVm.Role.templateProcessor, dataCenterId, State.Running, State.Migrating, State.Starting, State.Stopped, State.Stopping); int vmSize = (ssVms == null) ? 0 : ssVms.size(); - List ssStores = _dataStoreMgr.getImageStoresByScope(new ZoneScope(dataCenterId)); + List ssStores = _dataStoreMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(dataCenterId)); int storeSize = (ssStores == null) ? 0 : ssStores.size(); if (storeSize > vmSize) { s_logger.info("No secondary storage vms found in datacenter id=" + dataCenterId + ", starting a new one"); @@ -1512,7 +1515,7 @@ public String getConfigComponentName() { @Override public ConfigKey[] getConfigKeys() { - return new ConfigKey[] {NTPServerConfig}; + return new ConfigKey[] {NTPServerConfig, MaxNumberOfSsvmsForMigration}; } } diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index ab98a812580f..9691f610f46e 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -54,6 +54,7 @@ import javax.naming.ConfigurationException; import org.apache.cloudstack.framework.security.keystore.KeystoreManager; +import org.apache.cloudstack.storage.NfsMountManagerImpl.PathParser; import org.apache.cloudstack.storage.command.CopyCmdAnswer; import org.apache.cloudstack.storage.command.CopyCommand; import org.apache.cloudstack.storage.command.DeleteCommand; @@ -67,7 +68,6 @@ import org.apache.cloudstack.storage.configdrive.ConfigDriveBuilder; import org.apache.cloudstack.storage.template.DownloadManager; import org.apache.cloudstack.storage.template.DownloadManagerImpl; -import org.apache.cloudstack.storage.NfsMountManagerImpl.PathParser; import org.apache.cloudstack.storage.template.UploadEntity; import org.apache.cloudstack.storage.template.UploadManager; import org.apache.cloudstack.storage.template.UploadManagerImpl; @@ -1060,6 +1060,10 @@ protected Answer execute(CopyCommand cmd) { DataStoreTO srcDataStore = srcData.getDataStore(); DataStoreTO destDataStore = destData.getDataStore(); + if (DataStoreRole.Image == srcDataStore.getRole() && DataStoreRole.Image == destDataStore.getRole()) { + return copyFromNfsToNfs(cmd); + } + if (srcData.getObjectType() == DataObjectType.SNAPSHOT && destData.getObjectType() == DataObjectType.TEMPLATE) { return createTemplateFromSnapshot(cmd); } @@ -1264,7 +1268,6 @@ protected long getVirtualSize(File file, ImageFormat format) { } protected File findFile(String path) { - File srcFile = _storage.getFile(path); if (!srcFile.exists()) { srcFile = _storage.getFile(path + ".qcow2"); @@ -1285,6 +1288,95 @@ protected File findFile(String path) { return srcFile; } + protected Answer copyFromNfsToNfs(CopyCommand cmd) { + s_logger.info("PEARL - copying from nfs to nfs"); + try { + long randSleep = (long) (Math.random() * (((600 - 300) + 1) + 300 * 1000)); + Thread.sleep(randSleep); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + } + final DataTO srcData = cmd.getSrcTO(); + final DataTO destData = cmd.getDestTO(); + DataStoreTO srcDataStore = srcData.getDataStore(); + NfsTO srcStore = (NfsTO)srcDataStore; + DataStoreTO destDataStore = destData.getDataStore(); + final NfsTO destStore = (NfsTO) destDataStore; + try { + s_logger.info("PEARL - src store url = "+ srcStore.getUrl()); + s_logger.info("PEARL - dest store url = "+ destStore.getUrl()); + File srcFile = new File(getDir(srcStore.getUrl(), _nfsVersion), srcData.getPath()); + s_logger.info("PEARL - src file = "+ srcFile.getPath() + " src filename: "+ srcFile.getName()); + File destFile = new File(getDir(destStore.getUrl(), _nfsVersion), destData.getPath()); + s_logger.info("PEARL - dest file = "+destFile.getPath()+ " dest filename = "+destFile.getName()); + ImageFormat format = getTemplateFormat(srcFile.getName()); + s_logger.info("PEARL - file format = "+format); + + if (srcFile == null) { + return new CopyCmdAnswer("Can't find src file:" + srcFile); + } + + if (srcData instanceof TemplateObjectTO || srcData instanceof VolumeObjectTO) { + File srcDir = null; + if (srcFile.isFile()) { + srcDir = new File(srcFile.getParent()); + } + File destDir = null; + if (destFile.isFile()) { + destDir = new File(destFile.getParent()); + } + + s_logger.info("PEARL - src dir == " + srcDir); + s_logger.info("PEARL - dest dir == " + destDir); + try { + FileUtils.copyDirectory((srcDir == null ? srcFile : srcDir), (destDir == null? destFile : destDir)); + //FileUtils.copyFile(srcFile, destFile); + } catch (IOException e) { + String msg = "PEARL - Failed to copy file to destination"; + s_logger.info(msg); + return new CopyCmdAnswer(msg); + } + } else { + destFile = new File(destFile, srcFile.getName()); + try { + FileUtils.copyFile(srcFile, destFile); + } catch (IOException e) { + String msg = "PEARL - Failed to copy file to destination"; + s_logger.info(msg); + return new CopyCmdAnswer(msg); + } + } + + DataTO retObj = null; + // TODO: remove it maybe ? + if (destData.getObjectType() == DataObjectType.TEMPLATE) { + TemplateObjectTO newTemplate = new TemplateObjectTO(); + s_logger.info("PEARL - src filename = "+ srcFile.getName() + " dest install path = "+destData.getPath() + File.separator + srcFile.getName()); + newTemplate.setPath(destData.getPath() + File.separator + srcFile.getName()); + newTemplate.setSize(getVirtualSize(srcFile, format)); + s_logger.info("PEARL - file size = "+ getVirtualSize(srcFile, format)); + newTemplate.setPhysicalSize(srcFile.length()); + s_logger.info("PEARL - file phy size = "+ getVirtualSize(srcFile, format)); + newTemplate.setFormat(format); + + retObj = newTemplate; + } else if (destData.getObjectType() == DataObjectType.VOLUME) { + VolumeObjectTO newVol = new VolumeObjectTO(); + newVol.setPath(destData.getPath() + File.separator + srcFile.getName()); + newVol.setSize(srcFile.length()); + retObj = newVol; + } else if (destData.getObjectType() == DataObjectType.SNAPSHOT) { + SnapshotObjectTO newSnapshot = new SnapshotObjectTO(); + newSnapshot.setPath(destData.getPath() + File.separator + destFile.getName()); + retObj = newSnapshot; + } + return new CopyCmdAnswer(retObj); + } catch (Exception e) { + s_logger.error("failed to copy file" + srcData.getPath(), e); + return new CopyCmdAnswer("failed to copy file" + srcData.getPath() + e.toString()); + } + } + protected Answer copyFromNfsToS3(CopyCommand cmd) { final DataTO srcData = cmd.getSrcTO(); final DataTO destData = cmd.getDestTO(); @@ -2443,6 +2535,18 @@ protected Answer deleteVolume(final DeleteCommand cmd) { } + private String getDir(String secUrl, Integer nfsVersion) { + try { + URI uri = new URI(secUrl); + String dir = mountUri(uri, nfsVersion); + return _parent + "/" + dir; + } catch (Exception e) { + String msg = "GetRootDir for " + secUrl + " failed due to " + e.toString(); + s_logger.error(msg, e); + throw new CloudRuntimeException(msg); + } + } + @Override synchronized public String getRootDir(String secUrl, Integer nfsVersion) { if (!_inSystemVM) { @@ -2502,6 +2606,7 @@ public PingCommand getCurrentStatus(final long id) { @Override public boolean configure(String name, Map params) throws ConfigurationException { + // TODO: create GS for number of threads for migrate job _eth1ip = (String)params.get("eth1ip"); _eth1mask = (String)params.get("eth1mask"); if (_eth1ip != null) { // can only happen inside service vm diff --git a/tools/apidoc/gen_toc.py b/tools/apidoc/gen_toc.py index ef98b1358987..ca1c44fcd829 100644 --- a/tools/apidoc/gen_toc.py +++ b/tools/apidoc/gen_toc.py @@ -155,6 +155,7 @@ 'createSecondaryStagingStore': 'Image Store', 'deleteSecondaryStagingStore': 'Image Store', 'listSecondaryStagingStores': 'Image Store', + 'updateImageStore': 'Image Store', 'InternalLoadBalancer': 'Internal LB', 'DeploymentPlanners': 'Configuration', 'ObjectStore': 'Image Store', From ac50ee67e3e46bf8734a55770549f648dae8a1b8 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 4 May 2020 10:07:06 +0530 Subject: [PATCH 02/40] Code clean up and logging --- .../main/java/com/cloud/event/EventTypes.java | 10 +- .../apache/cloudstack/api/ApiConstants.java | 1 - .../MigrateSecondaryStorageDataCmd.java | 35 ++--- .../admin/storage/UpdateImageStoreCmd.java | 2 - .../service/StorageOrchestrationService.java | 2 +- .../orchestration/StorageOrchestrator.java | 148 +++++------------- .../storage/motion/DataMotionServiceImpl.java | 2 +- .../storage/image/StorageServiceImpl.java | 17 +- .../image/BaseImageStoreDriverImpl.java | 4 +- .../cloud/storage/ImageStoreServiceImpl.java | 2 +- .../PremiumSecondaryStorageManagerImpl.java | 4 +- .../resource/NfsSecondaryStorageResource.java | 24 +-- 12 files changed, 66 insertions(+), 185 deletions(-) diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 69044738f7fb..598ede6c0a95 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -22,6 +22,10 @@ import org.apache.cloudstack.acl.Role; import org.apache.cloudstack.acl.RolePermission; import org.apache.cloudstack.annotation.Annotation; +import org.apache.cloudstack.api.response.ClusterResponse; +import org.apache.cloudstack.api.response.HostResponse; +import org.apache.cloudstack.api.response.PodResponse; +import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.config.Configuration; import org.apache.cloudstack.ha.HAConfig; import org.apache.cloudstack.usage.Usage; @@ -76,10 +80,6 @@ import com.cloud.vm.Nic; import com.cloud.vm.NicSecondaryIp; import com.cloud.vm.VirtualMachine; -import org.apache.cloudstack.api.response.ClusterResponse; -import org.apache.cloudstack.api.response.HostResponse; -import org.apache.cloudstack.api.response.PodResponse; -import org.apache.cloudstack.api.response.ZoneResponse; public class EventTypes { @@ -229,7 +229,7 @@ public class EventTypes { public static final String EVENT_TEMPLATE_EXTRACT = "TEMPLATE.EXTRACT"; public static final String EVENT_TEMPLATE_UPLOAD = "TEMPLATE.UPLOAD"; public static final String EVENT_TEMPLATE_CLEANUP = "TEMPLATE.CLEANUP"; - public static final String EVENT_TEMPLATE_MIGRATE = "TEMPLATE.MIGRATE"; + public static final String EVENT_FILE_MIGRATE = "FILE.MIGRATE"; // Volume Events public static final String EVENT_VOLUME_CREATE = "VOLUME.CREATE"; diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 7d151d0ceb76..1c2cab71247c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -17,7 +17,6 @@ package org.apache.cloudstack.api; public class ApiConstants { - public static final String TEMP = "temp"; public static final String ACCOUNT = "account"; public static final String ACCOUNTS = "accounts"; public static final String ACCOUNT_TYPE = "accounttype"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java index 7e97903bea54..cf08ceb481ae 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java @@ -39,7 +39,7 @@ import com.cloud.utils.StringUtils; @APICommand(name = MigrateSecondaryStorageDataCmd.APINAME, - description = "migrates templates from one secondary storage to destination image store", + description = "migrates data objects from one secondary storage to destination image store(s)", responseObject = MigrationResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, @@ -58,30 +58,25 @@ public class MigrateSecondaryStorageDataCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.FROM, type = CommandType.UUID, entityType = ImageStoreResponse.class, - description = "id of the image store from where the data is to be migrated") + description = "id of the image store from where the data is to be migrated", + required = true) private Long id; @Parameter(name = ApiConstants.MIGRATE_TO, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = ImageStoreResponse.class, - description = "id of the destination secondary storage pool to which the templates are to be migrated to") + description = "id of the destination secondary storage pool to which the templates are to be migrated to", + required = true) private List migrateTo; @Parameter(name = ApiConstants.MIGRATION_TYPE, type = CommandType.STRING, - description = "partial: if you want data to be distributed evenly among the destination stores, " + - "complete: If you want to migrate the entire data from source image store to the destination store(s)") + description = "Balance: if you want data to be distributed evenly among the destination stores, " + + "Complete: If you want to migrate the entire data from source image store to the destination store(s)", + required = true) private String migrationType; - @Parameter(name = ApiConstants.TEMP, - type = CommandType.LONG, - description = "partial: if you want data to be distributed evenly among the destination stores, " + - "complete: If you want to migrate the entire data from source image store to the destination store(s)") - private Long temp; - - - ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -91,8 +86,6 @@ public Long getId() { return id; } - - public List getMigrateTo() { return migrateTo; } @@ -103,25 +96,17 @@ public String getMigrationType() { @Override public String getEventType() { - return EventTypes.EVENT_TEMPLATE_MIGRATE; + return EventTypes.EVENT_FILE_MIGRATE; } @Override public String getEventDescription() { - return "Attempting to migrate templates " + "from : " + this.getId() + " to: " + StringUtils.join(getMigrateTo(), ","); - } - - public Long getTemp() { - return temp; + return "Attempting to migrate files/data objects " + "from : " + this.getId() + " to: " + StringUtils.join(getMigrateTo(), ","); } @Override public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { - String message = "Migration still in progress"; MigrationResponse response = _imageStoreService.migrateData(this); - if (response.getMessage() == null) { - response.setMessage(message); - } response.setObjectName("imagestore"); this.setResponseObject(response); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java index b3e42113287a..d3ae9a91d1b6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java @@ -70,9 +70,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly()); ImageStoreResponse storeResponse = null; if (result != null) { - LOG.debug("PEARL - response received"); storeResponse = _responseGenerator.createImageStoreResponse(result); - LOG.debug("PEARL - store resp == "+storeResponse.getId() + " name: "+storeResponse.getName()+ " url"+storeResponse.getUrl()); storeResponse.setResponseName(getCommandName()+"response"); storeResponse.setObjectName("imagestore"); setResponseObject(storeResponse); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java index 6a45fa8b319d..54d66bf9ac8f 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java @@ -26,5 +26,5 @@ public static enum MigrationPolicy { Balance, Complete } - MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, String migrationPolicy, Long temp); + MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, String migrationPolicy); } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 336353f03db6..22037cc3b671 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -171,7 +171,7 @@ public boolean configure(String name, Map params) throws Configu } @Override - public MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, String migrationPolicy, Long temp) { + public MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, String migrationPolicy) { List files = new LinkedList<>(); int successCount = 0; boolean success = true; @@ -205,11 +205,6 @@ public int compare(DataObject o1, DataObject o2) { } }); - s_logger.debug("PEARL - sorted files"); - for (DataObject obj : files) { - s_logger.debug("PEARL - data object: " + obj.getDataStore().getName() + " Size : " + obj.getSize()); - } - if (files.isEmpty()) { return new MigrationResponse("No files in Image store "+srcDatastore.getId()+ " to migrate", migrationPolicy, true); } @@ -222,15 +217,11 @@ public int compare(DataObject o1, DataObject o2) { } storageCapacities.put(srcDataStoreId, new Pair<>(null, null)); - s_logger.debug("PEARL - before all"); - for (Map.Entry> entry : storageCapacities.entrySet()) { - s_logger.debug("PEARL - store id : " + entry.getKey() + " free capacity: " + entry.getValue().first() + " total cap: " + entry.getValue().second()); - } // If the migration policy is to completely migrate data from the given source Image Store, then set it's state // to readonly if (migrationPolicy.equals(ImageStoreService.MigrationPolicy.Complete.toString())) { - s_logger.debug("PEARL - setting source image store "+srcDatastore.getId()+ " to read-only"); + s_logger.debug("Setting source image store "+srcDatastore.getId()+ " to read-only"); storageService.updateImageStoreStatus(srcDataStoreId, true); } @@ -239,28 +230,17 @@ public int compare(DataObject o1, DataObject o2) { double threshold = ImageStoreImbalanceThreshold.value(); MigrationResponse response = null; - // TODO: core = max; & core = no of ssvms * concurrent/ssvm ThreadPoolExecutor executor = new ThreadPoolExecutor(numConcurrentCopyTasksPerSSVM , numConcurrentCopyTasksPerSSVM, 30, TimeUnit.MINUTES, new MigrateBlockingQueue<>(2)); - // TODO : return if meanstddev < threshold - s_logger.debug("PEARL - mean std deviation = " + meanstddev); - // TODO: uncomment when testing is completed -// if (meanstddev < threshold) { -// s_logger.debug("PEARL - mean std deviation of the storages is below threshold, no migration required"); -// response = new MigrationResponse("Migration not required as system seems balanced", migrationType, true); -// return response; -// } + if (meanstddev < threshold) { + s_logger.debug("mean std deviation of the image stores is below threshold, no migration required"); + response = new MigrationResponse("Migration not required as system seems balanced", migrationPolicy, true); + return response; + } List>> futures = new ArrayList<>(); while (true) { - s_logger.debug("PEARL - files size == " + files.size()); - s_logger.debug("PEARL - datastore dest size == " + destDatastores.size()); - s_logger.debug("PEARL - stores to capacity map == "); - for (Map.Entry> entry : storageCapacities.entrySet()) { - s_logger.debug("PEARL - store id : " + entry.getKey() + " free capacity: " + entry.getValue().first() + " total cap: " + entry.getValue().second()); - } - DataObject chosenFileForMigration = null; if (files.size() > 0) { chosenFileForMigration = files.remove(0); @@ -273,15 +253,10 @@ public int compare(DataObject o1, DataObject o2) { // If there aren't anymore files available for migration or no valid Image stores available for migration // end the migration process - destDatastoreId = temp; - s_logger.debug("PEARL - chosen file = "+ (chosenFileForMigration != null ? chosenFileForMigration.getId() : "null file")); - s_logger.debug("PEARL - destid "+ destDatastoreId); - s_logger.debug("PEARL - src id = "+ srcDatastore.getId()); if (chosenFileForMigration == null || destDatastoreId == null || destDatastoreId == srcDatastore.getId()) { - s_logger.debug("PEARL - migration completed "); if (destDatastoreId == srcDatastore.getId() && !files.isEmpty() ) { if (migrationPolicy.equals(ImageStoreService.MigrationPolicy.Balance.toString())) { - s_logger.debug("PEARL - src id = dest id"); + s_logger.debug("Migration completed : data stores have been balanced "); message = "Image stores have been balanced"; success = true; } else { @@ -297,30 +272,22 @@ public int compare(DataObject o1, DataObject o2) { } if (chosenFileForMigration.getSize() > storageCapacities.get(destDatastoreId).first()) { - s_logger.debug("PEARL - file " + chosenFileForMigration.getId() + " too large to be migrated to " + destDatastoreId); + s_logger.debug("file: " + chosenFileForMigration.getId() + " too large to be migrated to " + destDatastoreId); continue; } // If there is a benefit in migration of the chosen file to the destination store, then proceed with migration if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, migrationPolicy)) { Long fileSize = getFileSize(chosenFileForMigration, snapshotChains); - s_logger.debug("PEARL - in migrate decision function - yes"); - s_logger.debug("PEARL - current metrics = "); - for (Map.Entry> p : storageCapacities.entrySet()) { - s_logger.debug("PEARL - Datastore : " + p.getKey() + " free capacity: " + p.getValue().first() + " total capacity: " + p.getValue().second()); - } - storageCapacities = assumeMigrate(storageCapacities, srcDatastore.getId(), destDatastoreId, fileSize); long activeSsvms = activeSSVMCount(srcDatastore); long totalJobs = activeSsvms * numConcurrentCopyTasksPerSSVM; - s_logger.debug("PEARL - total jobs = "+ totalJobs); + // Increase thread pool size with increase in number of SSVMs if ( totalJobs > executor.getCorePoolSize()) { executor.setMaximumPoolSize((int) (totalJobs)); executor.setCorePoolSize((int) (totalJobs)); - s_logger.debug("PEARL - max pool size : "+ executor.getMaximumPoolSize()); - s_logger.debug("PEARL - core pool size : "+ executor.getCorePoolSize()); } MigrateDataTask task = new MigrateDataTask(chosenFileForMigration, srcDatastore, dataStoreManager.getDataStore(destDatastoreId, DataStoreRole.Image)); @@ -328,9 +295,8 @@ public int compare(DataObject o1, DataObject o2) { task.setSnapshotChains(snapshotChains); } futures.add((executor.submit(task))); - s_logger.debug("PEARL - migration of file " + chosenFileForMigration.getId() + " is done"); + s_logger.debug("Migration of file " + chosenFileForMigration.getId() + " is initiated"); } else { - s_logger.debug("PEARL - migration completed!"); if (migrationPolicy.equals(ImageStoreService.MigrationPolicy.Balance.toString())) { message = "Migration completed and has successfully balanced the data objects among stores: " + StringUtils.join(storageCapacities.keySet(), ","); } else { @@ -348,7 +314,8 @@ public int compare(DataObject o1, DataObject o2) { successCount++; } } catch ( InterruptedException | ExecutionException e) { - throw new CloudRuntimeException("Failed to get result"); + s_logger.warn("Failed to get result"); + continue; } } message += ". successful migrations: "+successCount; @@ -358,18 +325,13 @@ public int compare(DataObject o1, DataObject o2) { private Map> getStorageCapacities(Map> storageCapacities) { Map> capacities = new Hashtable<>(); for (Long storeId : storageCapacities.keySet()) { - s_logger.debug("PEARL - store ID = " + storeId); StorageStats stats = statsCollector.getStorageStats(storeId); if (stats != null) { if (storageCapacities.get(storeId) == null || storageCapacities.get(storeId).first() == null || storageCapacities.get(storeId).second() == null) { - s_logger.debug("PEARL - free caap : " + (stats.getCapacityBytes() - stats.getByteUsed())); - s_logger.debug("PEARL - total cap : " + stats.getCapacityBytes()); capacities.put(storeId, new Pair<>(stats.getCapacityBytes() - stats.getByteUsed(), stats.getCapacityBytes())); } else { long totalCapacity = stats.getCapacityBytes(); Long freeCapacity = totalCapacity - stats.getByteUsed(); - s_logger.debug("PEARL - pair value: " + storageCapacities.get(storeId)); - s_logger.debug("PEARL - free capacity = " + freeCapacity); if (freeCapacity >= storageCapacities.get(storeId).first()) { capacities.put(storeId, storageCapacities.get(storeId)); } else { @@ -380,10 +342,6 @@ private Map> getStorageCapacities(Map> p : capacities.entrySet()) { - s_logger.debug("PEARL - Datastore : " + p.getKey() + " free capacity: " + p.getValue().first() + " total capacity: " + p.getValue().second()); - } return capacities; } @@ -395,12 +353,7 @@ private Map> getStorageCapacities(Map> storageCapacities) { double[] freeCapacities = storageCapacities.values().stream().mapToDouble(x -> ((double) x.first() / x.second())).toArray(); - s_logger.debug("PEARL - free capcitites size :"); - for (double cap : freeCapacities) { - s_logger.debug("PEARL - cap : " + cap); - } double mean = calculateStorageMean(freeCapacities); - s_logger.debug("PEARL: - mean = " + mean); return (calculateStorageStandardDeviation(freeCapacities, mean) / mean); } @@ -409,7 +362,6 @@ private double getStandardDeviation(Map> storageCapacitie * an informed decision of picking the datastore with maximum free capactiy for migration */ private List sortDataStores(Map> storageCapacities) { - s_logger.debug("PEARL - storage capacity size: " + storageCapacities.size()); List>> list = new LinkedList>>((storageCapacities.entrySet())); @@ -421,14 +373,9 @@ public int compare(Map.Entry> e1, Map.Entry> temp = new LinkedHashMap<>(); for (Map.Entry> value : list) { - s_logger.debug("PEARL - list : " + value.getKey() + " pair val: " + value.getValue()); temp.put(value.getKey(), value.getValue()); } - s_logger.debug("PEARL - temp size: " + temp.size()); - for (Map.Entry> e : temp.entrySet()) { - s_logger.debug("PEARL - storeID : " + e.getKey() + " pair val: " + e.getValue()); - } return new ArrayList<>(temp.keySet()); } @@ -475,44 +422,40 @@ private Long getFileSize(DataObject file, Map> storageCapacities, Map, Long>> snapshotChains, String migrationPolicy) { - //private boolean shouldMigrate(DummyObject chosenFile, Long srcDatastoreId, Long destDatastoreId, Map> storageCapacities, String policy) { - return true; -// if (migrationPolicy == MigrationPolicy.Balance.toString()) { -// double meanStdDevCurrent = getStandardDeviation(storageCapacities); -// -// s_logger.debug("PEARL - meanstd deviation before migration = " + meanStdDevCurrent); -// Long fileSize = getFileSize(chosenFile, snapshotChains) -// Map> proposedCapacities = assumeMigrate(storageCapacities, srcDatastoreId, destDatastoreId, fileSize); -// double meanStdDevAfter = getStandardDeviation(proposedCapacities); -// -// // calculateStorageImbalanceAfterSupposedMigration(stores, storesToCapacityMap, meanStdDeviation, fileSize); -// -// s_logger.debug("PEARL - meanstd deviation after migration = " + meanStdDevAfter); -// -//// if (meanStdDevAfter > meanStdDevCurrent) { -//// s_logger.debug("PEARL - migrating the file doesn't prove to be beneficial, skipping migration"); -//// return false; -//// } -// -// Double threshold = ImageStoreImbalanceThreshold.value(); -// if (meanStdDevCurrent > threshold && storageCapacityBelowThreshold(storageCapacities, destDatastoreId)) { -// return true; -// } -// } else { -// if (storageCapacityBelowThreshold(storageCapacities, destDatastoreId)) { -// return true; -// } -// } -// return false; + + if (migrationPolicy == MigrationPolicy.Balance.toString()) { + double meanStdDevCurrent = getStandardDeviation(storageCapacities); + + Long fileSize = getFileSize(chosenFile, snapshotChains); + Map> proposedCapacities = assumeMigrate(storageCapacities, srcDatastoreId, destDatastoreId, fileSize); + double meanStdDevAfter = getStandardDeviation(proposedCapacities); + + + + if (meanStdDevAfter > meanStdDevCurrent) { + s_logger.debug("migrating the file doesn't prove to be beneficial, skipping migration"); + return false; + } + + Double threshold = ImageStoreImbalanceThreshold.value(); + if (meanStdDevCurrent > threshold && storageCapacityBelowThreshold(storageCapacities, destDatastoreId)) { + return true; + } + } else { + if (storageCapacityBelowThreshold(storageCapacities, destDatastoreId)) { + return true; + } + } + return false; } private boolean storageCapacityBelowThreshold(Map> storageCapacities, Long destStoreId) { Pair imageStoreCapacity = storageCapacities.get(destStoreId); if (imageStoreCapacity != null && (imageStoreCapacity.first() / (imageStoreCapacity.second() * 1.0)) <= imageStoreCapacityThreshold) { - s_logger.debug("PEARL - image store has sufficient capacity to proceed with migration of file"); + s_logger.debug("image store: " + destStoreId + " has sufficient capacity to proceed with migration of file"); return true; } - s_logger.debug("PEARL - image store capacity threshold exceeded, migration not possible"); + s_logger.debug("Image store capacity threshold exceeded, migration not possible"); return false; } @@ -587,15 +530,7 @@ private List getAllValidSnapshotChains(DataStore srcDataStore, Map, Long>(chain, getSizeForChain(chain))); } - //Log - for (DataObject snap: snapshotChains.keySet()) { - s_logger.debug("PEARL - parent = "+snap); - List chain = snapshotChains.get(snap).first(); - s_logger.debug("PEARL - chain: "); - for (int i =0;i "); - } - } + return (List) (List) files; } @@ -664,7 +599,6 @@ public DataObject getFile() { @Override public AsyncCallFuture call() throws Exception { - s_logger.debug("PEARL - running migration TASK"); return secStgSrv.migrateData(file, srcDataStore, destDataStore, snapshotChain); } } diff --git a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java index f7a5feaad39f..ac6c8555da96 100644 --- a/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java +++ b/engine/storage/datamotion/src/main/java/org/apache/cloudstack/storage/motion/DataMotionServiceImpl.java @@ -59,7 +59,7 @@ public void copyAsync(DataObject srcData, DataObject destData, Host destHost, As if (srcData.getDataStore() == null || destData.getDataStore() == null) { throw new CloudRuntimeException("can't find data store"); } - LOGGER.debug("PEARL - getting copyasync driver!!"); + if (srcData.getDataStore().getDriver().canCopy(srcData, destData)) { srcData.getDataStore().getDriver().copyAsync(srcData, destData, callback); return; diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java index cd4e88eddc71..86d747ca3fa0 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java @@ -72,29 +72,22 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D DataObject destDataObject = null; try { if (srcDataObject instanceof SnapshotInfo && snapshotChain.keySet().contains(srcDataObject)) { - s_logger.debug("PEARL - snapshot instance with a chain of snaps: size"+ snapshotChain.get(srcDataObject).first().size()); for (SnapshotInfo snapshotInfo : snapshotChain.get(srcDataObject).first()) { destDataObject = destDatastore.create(snapshotInfo); snapshotInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); - // migrateJob(future, snapshotInfo, destDataObject, destDatastore); - s_logger.debug("PEARL - snap name: "+ snapshotInfo.getName()); MigrateDataContext context = new MigrateDataContext(null, future, snapshotInfo, destDataObject, destDatastore); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); - s_logger.debug(snapshotInfo.getDataStore().getTO().toString()); motionSrv.copyAsync(snapshotInfo, destDataObject, caller); } } else { - s_logger.debug("PEARL - not a snapshot instance"); destDataObject = destDatastore.create(srcDataObject); srcDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); - //migrateJob(future, srcDataObject, destDataObject, destDatastore); MigrateDataContext context = new MigrateDataContext(null, future, srcDataObject, destDataObject, destDatastore); AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); - s_logger.debug(srcDataObject.getDataStore().getTO().toString()); motionSrv.copyAsync(srcDataObject, destDataObject, caller); } } catch (Exception e) { @@ -110,7 +103,6 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D } // protected void migrateJob(AsyncCallFuture future, DataObject srcDataObject, DataObject destDataObject, DataStore destDatastore) throws ExecutionException, InterruptedException { -// s_logger.debug("PEARL - in migrateJob() "); // MigrateDataContext context = new MigrateDataContext(null, future, srcDataObject, destDataObject, destDatastore); // AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); // caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); @@ -122,18 +114,15 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D * Callback function to handle state change of source and destination data objects based on the success or failure of the migrate task */ protected Void migrateDataCallBack(AsyncCallbackDispatcher callback, MigrateDataContext context) throws ExecutionException, InterruptedException { - s_logger.debug("PEARL - completed transfer - @ migrate callback"); DataObject srcData = context.srcData; DataObject destData = context.destData; - s_logger.debug("PEARL - src data = "+srcData.getUri()); - s_logger.debug("PEARL - dest data = "+ destData.getUri()); CopyCommandResult result = callback.getResult(); AsyncCallFuture future = context.future; DataObjectResult res = new DataObjectResult(srcData); CopyCmdAnswer answer = (CopyCmdAnswer) result.getAnswer(); try { if (!answer.getResult()) { - s_logger.debug("PEARL - migration failed"); + s_logger.warn("Migration failed for "+srcData.getUuid()); res.setResult(result.getResult()); srcData.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); destData.processEvent(ObjectInDataStoreStateMachine.Event.MigrationFailed); @@ -144,10 +133,10 @@ protected Void migrateDataCallBack(AsyncCallbackDispatcher= maxConcurrentCopyOpsPerSSVM) { continue; } @@ -392,7 +391,6 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa if (!sent) { // Picking endpoint with least number of copy commands running on it Long epId = ssvmWithLeastMigrateJobs(); - s_logger.debug("PEARL - edpoint : "+ epId); EndPoint endPoint = _defaultEpSelector.getEndPointFromHostId(epId); CommandExecLogVO execLog = new CommandExecLogVO(epId, _secStorageVmDao.findByInstanceName(hostDao.findById(epId).getName()).getId(), cmd.getClass().getSimpleName(), 1); Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); @@ -479,7 +477,7 @@ private Integer getCopyCmdsCountToSpecificSSVM(Long ssvmId) { } private Long ssvmWithLeastMigrateJobs() { - s_logger.debug("PEARL - picking ssvm from the pool with least commands running on it"); + s_logger.debug("Picking ssvm from the pool with least commands running on it"); String query = "select host_id, count(*) from cmd_exec_log group by host_id order by 2 limit 1;"; TransactionLegacy txn = TransactionLegacy.currentTxn(); diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index 001590258a07..cb02c8240333 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -108,7 +108,7 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { return new MigrationResponse(message, migrationType, false); } - return stgService.migrateData(srcImgStoreId, destDatastores, migrationType, cmd.getTemp()); + return stgService.migrateData(srcImgStoreId, destDatastores, migrationType); } diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index 35cdf37d1ea9..07d9971aa62d 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -160,8 +160,6 @@ public Pair scanPool(Long pool) { Integer maxSsvms = (hostsCount < MaxNumberOfSsvmsForMigration.value()) ? hostsCount : MaxNumberOfSsvmsForMigration.value(); currentTime = DateUtil.currentGMTTime().getTime(); - s_logger.debug("PEARL - current time: "+ currentTime); - s_logger.debug("PEARL - next spawn time: "+nextSpawnTime); if (alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() < _standbyCapacity) { s_logger.info("secondary storage command execution standby capactiy low (running VMs: " + alreadyRunning.size() + ", active cmds: " + activeCmds.size() + @@ -174,7 +172,7 @@ else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= alreadyR (((currentTime - copyCmdsInPipeline.get(alreadyRunning.size() - 1).getCreated().getTime()) /1000 > nMaxExecutionMinutes/2)) && (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { nextSpawnTime = currentTime + nMaxExecutionMinutes * 1000; - s_logger.debug("PEARL - scale SSVM!!!"); + s_logger.debug("scaling SSVM"); return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.templateProcessor); } diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 9691f610f46e..4e15721d45f6 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -1289,13 +1289,6 @@ protected File findFile(String path) { } protected Answer copyFromNfsToNfs(CopyCommand cmd) { - s_logger.info("PEARL - copying from nfs to nfs"); - try { - long randSleep = (long) (Math.random() * (((600 - 300) + 1) + 300 * 1000)); - Thread.sleep(randSleep); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - } final DataTO srcData = cmd.getSrcTO(); final DataTO destData = cmd.getDestTO(); DataStoreTO srcDataStore = srcData.getDataStore(); @@ -1303,14 +1296,9 @@ protected Answer copyFromNfsToNfs(CopyCommand cmd) { DataStoreTO destDataStore = destData.getDataStore(); final NfsTO destStore = (NfsTO) destDataStore; try { - s_logger.info("PEARL - src store url = "+ srcStore.getUrl()); - s_logger.info("PEARL - dest store url = "+ destStore.getUrl()); File srcFile = new File(getDir(srcStore.getUrl(), _nfsVersion), srcData.getPath()); - s_logger.info("PEARL - src file = "+ srcFile.getPath() + " src filename: "+ srcFile.getName()); File destFile = new File(getDir(destStore.getUrl(), _nfsVersion), destData.getPath()); - s_logger.info("PEARL - dest file = "+destFile.getPath()+ " dest filename = "+destFile.getName()); ImageFormat format = getTemplateFormat(srcFile.getName()); - s_logger.info("PEARL - file format = "+format); if (srcFile == null) { return new CopyCmdAnswer("Can't find src file:" + srcFile); @@ -1326,13 +1314,10 @@ protected Answer copyFromNfsToNfs(CopyCommand cmd) { destDir = new File(destFile.getParent()); } - s_logger.info("PEARL - src dir == " + srcDir); - s_logger.info("PEARL - dest dir == " + destDir); try { FileUtils.copyDirectory((srcDir == null ? srcFile : srcDir), (destDir == null? destFile : destDir)); - //FileUtils.copyFile(srcFile, destFile); } catch (IOException e) { - String msg = "PEARL - Failed to copy file to destination"; + String msg = "Failed to copy file to destination"; s_logger.info(msg); return new CopyCmdAnswer(msg); } @@ -1341,24 +1326,19 @@ protected Answer copyFromNfsToNfs(CopyCommand cmd) { try { FileUtils.copyFile(srcFile, destFile); } catch (IOException e) { - String msg = "PEARL - Failed to copy file to destination"; + String msg = "Failed to copy file to destination"; s_logger.info(msg); return new CopyCmdAnswer(msg); } } DataTO retObj = null; - // TODO: remove it maybe ? if (destData.getObjectType() == DataObjectType.TEMPLATE) { TemplateObjectTO newTemplate = new TemplateObjectTO(); - s_logger.info("PEARL - src filename = "+ srcFile.getName() + " dest install path = "+destData.getPath() + File.separator + srcFile.getName()); newTemplate.setPath(destData.getPath() + File.separator + srcFile.getName()); newTemplate.setSize(getVirtualSize(srcFile, format)); - s_logger.info("PEARL - file size = "+ getVirtualSize(srcFile, format)); newTemplate.setPhysicalSize(srcFile.length()); - s_logger.info("PEARL - file phy size = "+ getVirtualSize(srcFile, format)); newTemplate.setFormat(format); - retObj = newTemplate; } else if (destData.getObjectType() == DataObjectType.VOLUME) { VolumeObjectTO newVol = new VolumeObjectTO(); From 87f9360d76d514c35fe12b7dfe050bf6a47e7016 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 4 May 2020 10:22:17 +0530 Subject: [PATCH 03/40] Refactored code --- .../api/storage/DataStoreManager.java | 2 -- .../secstorage/CommandExecLogDaoImpl.java | 1 - .../datastore/db/ImageStoreDaoImpl.java | 2 -- .../ImageStoreProviderManagerImpl.java | 25 ------------------- .../datastore/DataStoreManagerImpl.java | 5 ---- .../datastore/ImageStoreProviderManager.java | 7 ------ .../PremiumSecondaryStorageManagerImpl.java | 7 ++---- .../resource/NfsSecondaryStorageResource.java | 1 - 8 files changed, 2 insertions(+), 48 deletions(-) diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java index 942294f367ba..80e3ce11c759 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/DataStoreManager.java @@ -54,6 +54,4 @@ public interface DataStoreManager { List listImageCacheStores(); boolean isRegionStore(DataStore store); - - List orderImageStoresOnFreeCapacity(List stores); } diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java index f5cfc1ed2839..048064e142cd 100644 --- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java @@ -37,7 +37,6 @@ public CommandExecLogDaoImpl() { ExpungeSearch.and("created", ExpungeSearch.entity().getCreated(), Op.LT); ExpungeSearch.done(); - CommandSearch = createSearchBuilder(); CommandSearch.and("host_id", CommandSearch.entity().getHostId(), Op.EQ); CommandSearch.and("command_name", CommandSearch.entity().getCommandName(), Op.EQ); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java index 006d31e7df06..5b73ec2a5e19 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java @@ -102,8 +102,6 @@ public List findByScopeExcludingReadOnly(ZoneScope scope) { sc.addAnd("scope", SearchCriteria.Op.SC, scc); sc.addAnd("readonly", SearchCriteria.Op.EQ, Boolean.FALSE); } - // we should return all image stores if cross-zone scope is passed - // (scopeId = null) return listBy(sc); } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java index 90089757a927..976077daccb5 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java @@ -188,31 +188,6 @@ public int compare(DataStore store1, DataStore store2) { return null; } - @Override - public List orderImageStoresOnFreeCapacity(List imageStores) { - List stores = new ArrayList<>(); - if (imageStores.size() > 1) { - imageStores.sort(new Comparator() { // Sort data stores based on free capacity - @Override - public int compare(DataStore store1, DataStore store2) { - return Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store1), - _statsCollector.imageStoreCurrentFreeCapacity(store2)); - } - }); - for (DataStore imageStore : imageStores) { - // Return image store if used percentage is less then threshold value i.e. 90%. - if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { - stores.add(imageStore); - } - } - } else if (imageStores.size() == 1) { - if (_statsCollector.imageStoreHasEnoughCapacity(imageStores.get(0))) { - stores.add(imageStores.get(0)); - } - } - return stores; - } - @Override public List listImageStoresWithFreeCapacity(List imageStores) { List stores = new ArrayList<>(); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java index ceb26b38c2ef..bf491d9d7510 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java @@ -130,11 +130,6 @@ public boolean isRegionStore(DataStore store) { return false; } - @Override - public List orderImageStoresOnFreeCapacity(List stores) { - return imageDataStoreMgr.orderImageStoresOnFreeCapacity(stores); - } - @Override public DataStore getPrimaryDataStore(long storeId) { return primaryStoreMgr.getPrimaryDataStore(storeId); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java index cb46d480f291..5d0432292aa9 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java @@ -78,11 +78,4 @@ public interface ImageStoreProviderManager { * @return the list of DataStore which have free capacity */ List listImageStoresWithFreeCapacity(List imageStores); - - /** - * Returns the provided list of Datastores in descending order of their free capacity - * @param imageStores list of image stores that need to be arranged - * @return sorted list - */ - List orderImageStoresOnFreeCapacity(List imageStores); } diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index 07d9971aa62d..86afdda2f9d1 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -97,7 +97,6 @@ public boolean configure(String name, Map params) throws Configu activeCopyCommandSearch.and("command_name", activeCopyCommandSearch.entity().getCommandName(), Op.EQ); activeCopyCommandSearch.join("hostSearch", hostSearch, activeCopyCommandSearch.entity().getHostId(), hostSearch.entity().getId(), JoinType.INNER); - hostSearch.done(); activeCommandSearch.done(); activeCopyCommandSearch.done(); @@ -151,9 +150,7 @@ public Pair scanPool(Long pool) { alreadyRunning = _secStorageVmDao.getSecStorageVmListInStates(null, dataCenterId, State.Running, State.Migrating, State.Starting); - List activeCmds = findActiveCommands(dataCenterId, cutTime, null); - - nMaxExecutionMinutes = 240; + List activeCmds = findActiveCommands(dataCenterId, cutTime); List copyCmdsInPipeline = findAllActiveCopyCommands(dataCenterId, cutTime); Integer hostsCount = _hostDao.countAllByType(Host.Type.Routing); @@ -209,7 +206,7 @@ public Pair assignSecStorageVm(long zoneId, Comman return null; } - private List findActiveCommands(long dcId, Date cutTime, String cmdName) { + private List findActiveCommands(long dcId, Date cutTime) { SearchCriteria sc = activeCommandSearch.create(); sc.setParameters("created", cutTime); sc.setJoinParameters("hostSearch", "dc", dcId); diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 4e15721d45f6..e00b7a08960b 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -2586,7 +2586,6 @@ public PingCommand getCurrentStatus(final long id) { @Override public boolean configure(String name, Map params) throws ConfigurationException { - // TODO: create GS for number of threads for migrate job _eth1ip = (String)params.get("eth1ip"); _eth1mask = (String)params.get("eth1mask"); if (_eth1ip != null) { // can only happen inside service vm From e4b9cca959db74b9904003fc50a5c8002061df7b Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 5 May 2020 15:43:38 +0530 Subject: [PATCH 04/40] Refactored code --- .../com/cloud/storage/ImageStoreService.java | 2 +- .../service/StorageOrchestrationService.java | 8 +-- .../orchestration/StorageOrchestrator.java | 58 +++++++++++++------ .../datastore/db/SnapshotDataStoreDao.java | 3 + .../storage/snapshot/SnapshotObject.java | 10 ++-- .../image/BaseImageStoreDriverImpl.java | 2 +- .../image/db/SnapshotDataStoreDaoImpl.java | 20 ++++++- .../java/com/cloud/configuration/Config.java | 4 +- .../cloud/storage/ImageStoreServiceImpl.java | 13 ++++- .../PremiumSecondaryStorageManagerImpl.java | 15 ++--- 10 files changed, 91 insertions(+), 44 deletions(-) diff --git a/api/src/main/java/com/cloud/storage/ImageStoreService.java b/api/src/main/java/com/cloud/storage/ImageStoreService.java index 049a9f807a97..1189751d6c03 100644 --- a/api/src/main/java/com/cloud/storage/ImageStoreService.java +++ b/api/src/main/java/com/cloud/storage/ImageStoreService.java @@ -23,7 +23,7 @@ public interface ImageStoreService { public static enum MigrationPolicy { - Balance, Complete + BALANCE, COMPLETE } MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd); } diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java index 54d66bf9ac8f..1227c6bb7a5b 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java @@ -21,10 +21,8 @@ import org.apache.cloudstack.api.response.MigrationResponse; -public interface StorageOrchestrationService { - public static enum MigrationPolicy { - Balance, Complete - } +import com.cloud.storage.ImageStoreService.MigrationPolicy; - MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, String migrationPolicy); +public interface StorageOrchestrationService { + MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, MigrationPolicy migrationPolicy); } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 22037cc3b671..99952e54a523 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -21,6 +21,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.Comparator; +import java.util.Date; import java.util.HashMap; import java.util.Hashtable; import java.util.LinkedHashMap; @@ -71,9 +72,8 @@ import com.cloud.host.dao.HostDao; import com.cloud.server.StatsCollector; import com.cloud.storage.DataStoreRole; -import com.cloud.storage.ImageStoreService; +import com.cloud.storage.ImageStoreService.MigrationPolicy; import com.cloud.storage.SnapshotVO; -import com.cloud.storage.Storage; import com.cloud.storage.StorageService; import com.cloud.storage.StorageStats; import com.cloud.storage.VMTemplateVO; @@ -166,18 +166,18 @@ public boolean offer(T task) { @Override public boolean configure(String name, Map params) throws ConfigurationException { - numConcurrentCopyTasksPerSSVM = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageCopyCmdMaxSessions.key()), 2); + numConcurrentCopyTasksPerSSVM = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageMaxMigrateSessions.key()), 2); return true; } @Override - public MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, String migrationPolicy) { + public MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, MigrationPolicy migrationPolicy) { List files = new LinkedList<>(); int successCount = 0; boolean success = true; String message = null; - if (migrationPolicy.equals(MigrationPolicy.Complete.toString())) { + if (migrationPolicy == MigrationPolicy.COMPLETE) { if (!filesReady(srcDataStoreId)) { throw new CloudRuntimeException("Complete migration failed as there are data objects which are not Ready"); } @@ -206,7 +206,7 @@ public int compare(DataObject o1, DataObject o2) { }); if (files.isEmpty()) { - return new MigrationResponse("No files in Image store "+srcDatastore.getId()+ " to migrate", migrationPolicy, true); + return new MigrationResponse("No files in Image store "+srcDatastore.getId()+ " to migrate", migrationPolicy.toString(), true); } // Create capacity class with free and total space, maybe id of ds too and use that as the value @@ -220,7 +220,7 @@ public int compare(DataObject o1, DataObject o2) { // If the migration policy is to completely migrate data from the given source Image Store, then set it's state // to readonly - if (migrationPolicy.equals(ImageStoreService.MigrationPolicy.Complete.toString())) { + if (migrationPolicy == MigrationPolicy.COMPLETE) { s_logger.debug("Setting source image store "+srcDatastore.getId()+ " to read-only"); storageService.updateImageStoreStatus(srcDataStoreId, true); } @@ -230,11 +230,11 @@ public int compare(DataObject o1, DataObject o2) { double threshold = ImageStoreImbalanceThreshold.value(); MigrationResponse response = null; - ThreadPoolExecutor executor = new ThreadPoolExecutor(numConcurrentCopyTasksPerSSVM , numConcurrentCopyTasksPerSSVM, 30, TimeUnit.MINUTES, new MigrateBlockingQueue<>(2)); - + ThreadPoolExecutor executor = new ThreadPoolExecutor(numConcurrentCopyTasksPerSSVM , numConcurrentCopyTasksPerSSVM, 30, TimeUnit.MINUTES, new MigrateBlockingQueue<>(numConcurrentCopyTasksPerSSVM)); + Date start = new Date(); if (meanstddev < threshold) { s_logger.debug("mean std deviation of the image stores is below threshold, no migration required"); - response = new MigrationResponse("Migration not required as system seems balanced", migrationPolicy, true); + response = new MigrationResponse("Migration not required as system seems balanced", migrationPolicy.toString(), true); return response; } @@ -255,7 +255,7 @@ public int compare(DataObject o1, DataObject o2) { // end the migration process if (chosenFileForMigration == null || destDatastoreId == null || destDatastoreId == srcDatastore.getId()) { if (destDatastoreId == srcDatastore.getId() && !files.isEmpty() ) { - if (migrationPolicy.equals(ImageStoreService.MigrationPolicy.Balance.toString())) { + if (migrationPolicy == MigrationPolicy.BALANCE) { s_logger.debug("Migration completed : data stores have been balanced "); message = "Image stores have been balanced"; success = true; @@ -297,7 +297,7 @@ public int compare(DataObject o1, DataObject o2) { futures.add((executor.submit(task))); s_logger.debug("Migration of file " + chosenFileForMigration.getId() + " is initiated"); } else { - if (migrationPolicy.equals(ImageStoreService.MigrationPolicy.Balance.toString())) { + if (migrationPolicy == MigrationPolicy.BALANCE) { message = "Migration completed and has successfully balanced the data objects among stores: " + StringUtils.join(storageCapacities.keySet(), ","); } else { message = "Complete migration failed. Please set the source Image store to read-write mode if you want to continue using it"; @@ -306,6 +306,30 @@ public int compare(DataObject o1, DataObject o2) { break; } } + Date end = new Date(); + // Migrate snapshots created during the migration process + List snaps = snapshotDataStoreDao.findSnapshots(srcDataStoreId, start, end); + if (!snaps.isEmpty()) { + for (SnapshotDataStoreVO snap : snaps) { + SnapshotVO snapshotVO = snapshotDao.findById(snap.getSnapshotId()); + SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snapshotVO.getSnapshotId(), DataStoreRole.Image); + SnapshotInfo parentSnapshot = snapshotInfo.getParent(); + + if (parentSnapshot == null && migrationPolicy == MigrationPolicy.COMPLETE) { + List dstores = sortDataStores(storageCapacities); + Long storeId = dstores.get(0); + if (storeId == srcDataStoreId) { + storeId = dstores.get(1); + } + DataStore datastore = dataStoreManager.getDataStore(storeId, DataStoreRole.Image); + futures.add(executor.submit(new MigrateDataTask(snapshotInfo, srcDatastore, datastore))); + } + if (parentSnapshot != null) { + DataStore parentDS = dataStoreManager.getDataStore(parentSnapshot.getDataStore().getId(), DataStoreRole.Image); + futures.add(executor.submit(new MigrateDataTask(snapshotInfo, srcDatastore, parentDS))); + } + } + } for (Future> future : futures) { try { @@ -319,7 +343,7 @@ public int compare(DataObject o1, DataObject o2) { } } message += ". successful migrations: "+successCount; - return new MigrationResponse(message, migrationPolicy, success); + return new MigrationResponse(message, migrationPolicy.toString(), success); } private Map> getStorageCapacities(Map> storageCapacities) { @@ -421,17 +445,15 @@ private Long getFileSize(DataObject file, Map> storageCapacities, - Map, Long>> snapshotChains, String migrationPolicy) { + Map, Long>> snapshotChains, MigrationPolicy migrationPolicy) { - if (migrationPolicy == MigrationPolicy.Balance.toString()) { + if (migrationPolicy == MigrationPolicy.BALANCE) { double meanStdDevCurrent = getStandardDeviation(storageCapacities); Long fileSize = getFileSize(chosenFile, snapshotChains); Map> proposedCapacities = assumeMigrate(storageCapacities, srcDatastoreId, destDatastoreId, fileSize); double meanStdDevAfter = getStandardDeviation(proposedCapacities); - - if (meanStdDevAfter > meanStdDevCurrent) { s_logger.debug("migrating the file doesn't prove to be beneficial, skipping migration"); return false; @@ -496,7 +518,7 @@ private List getAllValidTemplates(DataStore srcDataStore) { List templates = templateDataStoreDao.listByStoreId(srcDataStore.getId()); for (TemplateDataStoreVO template : templates) { VMTemplateVO templateVO = templateDao.findById(template.getTemplateId()); - if (template.getState() == ObjectInDataStoreStateMachine.State.Ready && !templateVO.isPublicTemplate() && templateVO.getTemplateType() != Storage.TemplateType.SYSTEM) { + if (template.getState() == ObjectInDataStoreStateMachine.State.Ready && !templateVO.isPublicTemplate()) { files.add(templateFactory.getTemplate(template.getTemplateId(), srcDataStore)); } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java index 819cb7bc6cea..494974122551 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/SnapshotDataStoreDao.java @@ -16,6 +16,7 @@ // under the License. package org.apache.cloudstack.storage.datastore.db; +import java.util.Date; import java.util.List; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; @@ -71,4 +72,6 @@ public interface SnapshotDataStoreDao extends GenericDao listAllByVolumeAndDataStore(long volumeId, DataStoreRole role); List listByState(ObjectInDataStoreStateMachine.State... states); + + List findSnapshots(Long storeId, Date start, Date end); } diff --git a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java index df5bc8174d0f..f107343f0def 100644 --- a/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java +++ b/engine/storage/snapshot/src/main/java/org/apache/cloudstack/storage/snapshot/SnapshotObject.java @@ -138,12 +138,12 @@ public List getChildren() { sc.and(sc.entity().getState(), Op.NIN, State.Destroying, State.Destroyed, State.Error); sc.and(sc.entity().getParentSnapshotId(), Op.EQ, getId()); List vos = sc.list(); - if (vos == null) { - return null; - } + List children = new ArrayList<>(); - for (SnapshotDataStoreVO vo : vos ) { - children.add(snapshotFactory.getSnapshot(vo.getId(), store)); + if (vos != null) { + for (SnapshotDataStoreVO vo : vos) { + children.add(snapshotFactory.getSnapshot(vo.getSnapshotId(), DataStoreRole.Image)); + } } return children; } diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index 02684a412fae..c22daceaffc1 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -362,7 +362,7 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa (srcdata.getType() == DataObjectType.VOLUME && destData.getType() == DataObjectType.VOLUME)) { int nMaxExecutionMinutes = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageCmdExecutionTimeMax.key()), 30); - int maxConcurrentCopyOpsPerSSVM = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageCopyCmdMaxSessions.key()), 2); + int maxConcurrentCopyOpsPerSSVM = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageMaxMigrateSessions.key()), 2); CopyCommand cmd = new CopyCommand(srcdata.getTO(), destData.getTO(), nMaxExecutionMinutes * 60 * 1000, true); Answer answer = null; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java index c7358d1558b7..9c009d1c2ead 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/SnapshotDataStoreDaoImpl.java @@ -27,8 +27,6 @@ import javax.inject.Inject; import javax.naming.ConfigurationException; -import com.cloud.storage.DataStoreRole; -import com.cloud.storage.SnapshotVO; import org.apache.cloudstack.engine.subsystem.api.storage.DataObjectInStore; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine.Event; @@ -39,6 +37,8 @@ import org.springframework.stereotype.Component; import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.SnapshotVO; import com.cloud.storage.dao.SnapshotDao; import com.cloud.utils.db.DB; import com.cloud.utils.db.Filter; @@ -65,6 +65,7 @@ public class SnapshotDataStoreDaoImpl extends GenericDaoBase stateSearch; private SearchBuilder parentSnapshotSearch; private SearchBuilder snapshotVOSearch; + private SearchBuilder snapshotCreatedSearch; public static ArrayList hypervisorsSupportingSnapshotsChaining = new ArrayList(); @@ -158,6 +159,11 @@ public boolean configure(String name, Map params) throws Configu snapshotVOSearch.and("volume_id", snapshotVOSearch.entity().getVolumeId(), SearchCriteria.Op.EQ); snapshotVOSearch.done(); + snapshotCreatedSearch = createSearchBuilder(); + snapshotCreatedSearch.and("store_id", snapshotCreatedSearch.entity().getDataStoreId(), Op.EQ); + snapshotCreatedSearch.and("created", snapshotCreatedSearch.entity().getCreated(), Op.BETWEEN); + snapshotCreatedSearch.done(); + return true; } @@ -461,6 +467,16 @@ public List listByState(ObjectInDataStoreStateMachine.State return listBy(sc, null); } + @Override + public List findSnapshots(Long storeId, Date start, Date end) { + SearchCriteria sc = snapshotCreatedSearch.create(); + sc.setParameters("store_id", storeId); + if (start != null && end != null) { + sc.setParameters("created", start, end); + } + return search(sc, null); + } + private boolean isSnapshotChainingRequired(long volumeId) { hypervisorsSupportingSnapshotsChaining.add(Hypervisor.HypervisorType.XenServer); diff --git a/server/src/main/java/com/cloud/configuration/Config.java b/server/src/main/java/com/cloud/configuration/Config.java index b1ec5bd654b0..81495546fc43 100644 --- a/server/src/main/java/com/cloud/configuration/Config.java +++ b/server/src/main/java/com/cloud/configuration/Config.java @@ -1811,8 +1811,8 @@ public enum Config { SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "", "PSK with SSVM", null), - SecStorageCopyCmdMaxSessions( - "Advanced", AgentManager.class, Integer.class, "secstorage.cpy.cmd.max.sessions","2","The max number of concurrent copy command execution sessions that an SSVM can handle",null); + SecStorageMaxMigrateSessions( + "Advanced", AgentManager.class, Integer.class, "secstorage.max.migrate.sessions","2","The max number of concurrent copy command execution sessions that an SSVM can handle",null); private final String _category; private final Class _componentClass; diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index cb02c8240333..9291321e9576 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; +import org.apache.commons.lang3.EnumUtils; import org.apache.log4j.Logger; import com.cloud.utils.component.ManagerBase; @@ -68,7 +69,13 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { Long srcImgStoreId = cmd.getId(); ImageStoreVO srcImageVO = imageStoreDao.findById(srcImgStoreId); List destImgStoreIds = cmd.getMigrateTo(); - String migrationType = cmd.getMigrationType(); + String migrationType = cmd.getMigrationType().toUpperCase(); + + if (!EnumUtils.isValidEnum(MigrationPolicy.class, migrationType)) { + throw new CloudRuntimeException("Not a valid migration policy"); + } + + MigrationPolicy policy = MigrationPolicy.valueOf(migrationType); String message = null; @@ -105,10 +112,10 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { if (isMigrateJobRunning()){ message = "A migrate job is in progress, please try again later..."; - return new MigrationResponse(message, migrationType, false); + return new MigrationResponse(message, policy.toString(), false); } - return stgService.migrateData(srcImgStoreId, destDatastores, migrationType); + return stgService.migrateData(srcImgStoreId, destDatastores, policy); } diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index 86afdda2f9d1..b74bb300a754 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -57,9 +57,8 @@ public class PremiumSecondaryStorageManagerImpl extends SecondaryStorageManagerI private int migrateCapPerSSVM = DEFAULT_MIGRATE_SS_VM_CAPACITY; private int _standbyCapacity = SecondaryStorageVmManager.DEFAULT_STANDBY_CAPACITY; private int _maxExecutionTimeMs = 1800000; - int nMaxExecutionMinutes = 120; long currentTime = DateUtil.currentGMTTime().getTime(); - long nextSpawnTime = currentTime + (nMaxExecutionMinutes *1000) ; + long nextSpawnTime = currentTime + _maxExecutionTimeMs; private List migrationSSVMS = new ArrayList<>(); @Inject @@ -83,6 +82,7 @@ public boolean configure(String name, Map params) throws Configu int nMaxExecutionMinutes = NumbersUtil.parseInt(_configDao.getValue(Config.SecStorageCmdExecutionTimeMax.key()), 30); _maxExecutionTimeMs = nMaxExecutionMinutes * 60 * 1000; + nextSpawnTime = currentTime + _maxExecutionTimeMs; hostSearch = _hostDao.createSearchBuilder(); hostSearch.and("dc", hostSearch.entity().getDataCenterId(), Op.EQ); @@ -149,11 +149,12 @@ public Pair scanPool(Long pool) { } alreadyRunning = _secStorageVmDao.getSecStorageVmListInStates(null, dataCenterId, State.Running, State.Migrating, State.Starting); - List activeCmds = findActiveCommands(dataCenterId, cutTime); - + // Find running copy / migrate commands running List copyCmdsInPipeline = findAllActiveCopyCommands(dataCenterId, cutTime); + // Count of total hosts Integer hostsCount = _hostDao.countAllByType(Host.Type.Routing); + // Maximum number of allowed SSVMs for migration task Integer maxSsvms = (hostsCount < MaxNumberOfSsvmsForMigration.value()) ? hostsCount : MaxNumberOfSsvmsForMigration.value(); currentTime = DateUtil.currentGMTTime().getTime(); @@ -166,14 +167,14 @@ public Pair scanPool(Long pool) { // Scale the number of SSVMs if the number of Copy operations is greater than the number of SSVMs running and the copy operation has been in pipeline for // more than half of the total time allocated for secondary storage operations else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= alreadyRunning.size() && - (((currentTime - copyCmdsInPipeline.get(alreadyRunning.size() - 1).getCreated().getTime()) /1000 > nMaxExecutionMinutes/2)) && + (((currentTime - copyCmdsInPipeline.get(alreadyRunning.size() - 1).getCreated().getTime()) > _maxExecutionTimeMs/2 )) && (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { - nextSpawnTime = currentTime + nMaxExecutionMinutes * 1000; + nextSpawnTime = currentTime + _maxExecutionTimeMs/2; s_logger.debug("scaling SSVM"); return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.templateProcessor); } - // Scale down the number of SSVMs if the load on then has reduced + // Scale down the number of SSVMs if the load on them has reduced if ((copyCmdsInPipeline.size() < alreadyRunning.size() && alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() > _standbyCapacity) && alreadyRunning.size() > 1) { Collections.reverse(alreadyRunning); for(SecondaryStorageVmVO vm : alreadyRunning) { From db6a434da5382f20c1d8435a4028cc6c41313145 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 6 May 2020 16:09:25 +0530 Subject: [PATCH 05/40] fine tuned scaling condition + allocation algorithm GS --- .../MigrateSecondaryStorageDataCmd.java | 8 ++-- .../orchestration/StorageOrchestrator.java | 4 +- .../storage/image/StorageServiceImpl.java | 10 +---- .../ImageStoreProviderManagerImpl.java | 40 ++++++++++++++++++- .../datastore/DataStoreManagerImpl.java | 1 - .../datastore/ImageStoreProviderManager.java | 2 + .../java/com/cloud/configuration/Config.java | 8 ++++ .../PremiumSecondaryStorageManagerImpl.java | 16 ++++---- 8 files changed, 65 insertions(+), 24 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java index cf08ceb481ae..ca38186ff289 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java @@ -25,7 +25,6 @@ import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; -import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.api.response.MigrationResponse; import org.apache.cloudstack.context.CallContext; import org.apache.log4j.Logger; @@ -57,7 +56,7 @@ public class MigrateSecondaryStorageDataCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.FROM, type = CommandType.UUID, - entityType = ImageStoreResponse.class, + entityType = MigrationResponse.class, description = "id of the image store from where the data is to be migrated", required = true) private Long id; @@ -65,7 +64,7 @@ public class MigrateSecondaryStorageDataCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.MIGRATE_TO, type = CommandType.LIST, collectionType = CommandType.UUID, - entityType = ImageStoreResponse.class, + entityType = MigrationResponse.class, description = "id of the destination secondary storage pool to which the templates are to be migrated to", required = true) private List migrateTo; @@ -81,7 +80,6 @@ public class MigrateSecondaryStorageDataCmd extends BaseAsyncCmd { /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// - public Long getId() { return id; } @@ -120,4 +118,4 @@ public String getCommandName() { public long getEntityOwnerId() { return CallContext.current().getCallingAccountId(); } -} +} \ No newline at end of file diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 99952e54a523..374237c131ae 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -326,7 +326,9 @@ public int compare(DataObject o1, DataObject o2) { } if (parentSnapshot != null) { DataStore parentDS = dataStoreManager.getDataStore(parentSnapshot.getDataStore().getId(), DataStoreRole.Image); - futures.add(executor.submit(new MigrateDataTask(snapshotInfo, srcDatastore, parentDS))); + if (parentDS.getId() != snapshotInfo.getDataStore().getId()) { + futures.add(executor.submit(new MigrateDataTask(snapshotInfo, srcDatastore, parentDS))); + } } } } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java index 86d747ca3fa0..94dc54e0d3cf 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java @@ -71,7 +71,7 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D DataObjectResult res = new DataObjectResult(srcDataObject); DataObject destDataObject = null; try { - if (srcDataObject instanceof SnapshotInfo && snapshotChain.keySet().contains(srcDataObject)) { + if (srcDataObject instanceof SnapshotInfo && snapshotChain != null && snapshotChain.keySet().contains(srcDataObject)) { for (SnapshotInfo snapshotInfo : snapshotChain.get(srcDataObject).first()) { destDataObject = destDatastore.create(snapshotInfo); snapshotInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); @@ -102,14 +102,6 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D return future; } -// protected void migrateJob(AsyncCallFuture future, DataObject srcDataObject, DataObject destDataObject, DataStore destDatastore) throws ExecutionException, InterruptedException { -// MigrateDataContext context = new MigrateDataContext(null, future, srcDataObject, destDataObject, destDatastore); -// AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); -// caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); -// s_logger.debug(srcDataObject.getDataStore().getTO().toString()); -// motionSrv.copyAsync(srcDataObject, destDataObject, caller); -// } - /** * Callback function to handle state change of source and destination data objects based on the success or failure of the migrate task */ diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java index 976077daccb5..d1670e879fb9 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java @@ -33,6 +33,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ImageStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.image.ImageStoreDriver; @@ -42,6 +43,7 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; +import com.cloud.configuration.Config; import com.cloud.server.StatsCollector; import com.cloud.storage.ScopeType; import com.cloud.storage.dao.VMTemplateDao; @@ -57,6 +59,9 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager DataStoreProviderManager providerManager; @Inject StatsCollector _statsCollector; + @Inject + ConfigurationDao configDao; + Map driverMaps; @PostConstruct @@ -120,12 +125,20 @@ public List listImageStoresByScope(ZoneScope scope) { @Override public List listImageStoresByScopeExcludingReadOnly(ZoneScope scope) { + String allocationAlgorithm = configDao.getValue(Config.ImageStoreAllocationAlgorithm.key()); + List stores = dataStoreDao.findByScopeExcludingReadOnly(scope); List imageStores = new ArrayList(); for (ImageStoreVO store : stores) { imageStores.add(getImageStore(store.getId())); } - return imageStores; + if (allocationAlgorithm.equals("random")) { + Collections.shuffle(imageStores); + return imageStores; + } else if (allocationAlgorithm.equals("firstfitleastconsumed")) { + return orderImageStoresOnFreeCapacity(imageStores); + } + return null; } @Override @@ -188,6 +201,31 @@ public int compare(DataStore store1, DataStore store2) { return null; } + @Override + public List orderImageStoresOnFreeCapacity(List imageStores) { + List stores = new ArrayList<>(); + if (imageStores.size() > 1) { + imageStores.sort(new Comparator() { // Sort data stores based on free capacity + @Override + public int compare(DataStore store1, DataStore store2) { + return Long.compare(_statsCollector.imageStoreCurrentFreeCapacity(store1), + _statsCollector.imageStoreCurrentFreeCapacity(store2)); + } + }); + for (DataStore imageStore : imageStores) { + // Return image store if used percentage is less then threshold value i.e. 90%. + if (_statsCollector.imageStoreHasEnoughCapacity(imageStore)) { + stores.add(imageStore); + } + } + } else if (imageStores.size() == 1) { + if (_statsCollector.imageStoreHasEnoughCapacity(imageStores.get(0))) { + stores.add(imageStores.get(0)); + } + } + return stores; + } + @Override public List listImageStoresWithFreeCapacity(List imageStores) { List stores = new ArrayList<>(); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java index bf491d9d7510..ff6c4fb5c6a7 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/DataStoreManagerImpl.java @@ -97,7 +97,6 @@ public DataStore getRandomUsableImageStore(long zoneId) { @Override public DataStore getImageStoreWithFreeCapacity(long zoneId) { - //List stores = getImageStoresByScope(new ZoneScope(zoneId)); List stores = getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (stores == null || stores.size() == 0) { return null; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java index 5d0432292aa9..7e2f720042ed 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/datastore/ImageStoreProviderManager.java @@ -78,4 +78,6 @@ public interface ImageStoreProviderManager { * @return the list of DataStore which have free capacity */ List listImageStoresWithFreeCapacity(List imageStores); + + List orderImageStoresOnFreeCapacity(List imageStores); } diff --git a/server/src/main/java/com/cloud/configuration/Config.java b/server/src/main/java/com/cloud/configuration/Config.java index 81495546fc43..769ff505aed7 100644 --- a/server/src/main/java/com/cloud/configuration/Config.java +++ b/server/src/main/java/com/cloud/configuration/Config.java @@ -905,6 +905,14 @@ public enum Config { "random", "'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit', 'firstfitleastconsumed' : Order in which hosts within a cluster will be considered for VM/volume allocation.", null), + ImageStoreAllocationAlgorithm( + "Advanced", + ManagementServer.class, + String.class, + "image.store.allocation.algorithm", + "firstfitleastconsumed", + "firstfitleastconsumed','random' : Order in which hosts within a cluster will be considered for VM/volume allocation.", + null), VmDeploymentPlanner( "Advanced", ManagementServer.class, diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index b74bb300a754..20cf36d4c236 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -40,6 +40,7 @@ import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; +import com.cloud.utils.db.Filter; import com.cloud.utils.db.JoinBuilder.JoinType; import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; @@ -156,7 +157,8 @@ public Pair scanPool(Long pool) { Integer hostsCount = _hostDao.countAllByType(Host.Type.Routing); // Maximum number of allowed SSVMs for migration task Integer maxSsvms = (hostsCount < MaxNumberOfSsvmsForMigration.value()) ? hostsCount : MaxNumberOfSsvmsForMigration.value(); - + int numConcurrentCopyTasksPerSSVM = NumbersUtil.parseInt(_configDao.getValue(Config.SecStorageMaxMigrateSessions.key()), 2); + int halfLimit = (alreadyRunning.size() * numConcurrentCopyTasksPerSSVM / 2); currentTime = DateUtil.currentGMTTime().getTime(); if (alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() < _standbyCapacity) { @@ -166,8 +168,9 @@ public Pair scanPool(Long pool) { } // Scale the number of SSVMs if the number of Copy operations is greater than the number of SSVMs running and the copy operation has been in pipeline for // more than half of the total time allocated for secondary storage operations - else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= alreadyRunning.size() && - (((currentTime - copyCmdsInPipeline.get(alreadyRunning.size() - 1).getCreated().getTime()) > _maxExecutionTimeMs/2 )) && + + else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() > halfLimit && + (((currentTime - copyCmdsInPipeline.get(halfLimit - 1).getCreated().getTime()) > _maxExecutionTimeMs/2 )) && (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { nextSpawnTime = currentTime + _maxExecutionTimeMs/2; s_logger.debug("scaling SSVM"); @@ -175,7 +178,7 @@ else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= alreadyR } // Scale down the number of SSVMs if the load on them has reduced - if ((copyCmdsInPipeline.size() < alreadyRunning.size() && alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() > _standbyCapacity) && alreadyRunning.size() > 1) { + if ((copyCmdsInPipeline.size() <= halfLimit && alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() > _standbyCapacity) && alreadyRunning.size() > 1) { Collections.reverse(alreadyRunning); for(SecondaryStorageVmVO vm : alreadyRunning) { long count = copyCmdsInPipeline.stream().map(cmd -> cmd.getInstanceId() == vm.getId()).count(); @@ -203,7 +206,6 @@ public Pair assignSecStorageVm(long zoneId, Comman if (host != null && host.getStatus() == Status.Up) return new Pair(host, secStorageVm); } - return null; } @@ -222,8 +224,8 @@ private List findAllActiveCopyCommands(long dcId, Date cutTime sc.setParameters("command_name", "CopyCommand"); sc.setJoinParameters("hostSearch", "dc", dcId); sc.setJoinParameters("hostSearch", "status", Status.Up); - List result = _cmdExecLogDao.search(sc, null); - return result; + Filter filter = new Filter(CommandExecLogVO.class, "created", true, null, null); + return _cmdExecLogDao.search(sc, filter); } private boolean reserveStandbyCapacity() { From ed2fe4e1289c47ae4ac89c95c09fbee833334ecd Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 7 May 2020 10:18:07 +0530 Subject: [PATCH 06/40] Code changes and cleanup --- .../storage/MigrateSecondaryStorageDataCmd.java | 2 ++ .../engine/orchestration/StorageOrchestrator.java | 2 +- .../storage/image/StorageServiceImpl.java | 2 +- .../spring-engine-storage-image-core-context.xml | 2 -- .../storage/image/db/TemplateDataStoreDaoImpl.java | 4 ---- .../cloud/template/HypervisorTemplateAdapter.java | 2 -- .../PremiumSecondaryStorageManagerImpl.java | 14 +++++++------- 7 files changed, 11 insertions(+), 17 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java index ca38186ff289..0c7a2ff31242 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java @@ -35,11 +35,13 @@ import com.cloud.exception.NetworkRuleConflictException; import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; +import com.cloud.storage.ImageStore; import com.cloud.utils.StringUtils; @APICommand(name = MigrateSecondaryStorageDataCmd.APINAME, description = "migrates data objects from one secondary storage to destination image store(s)", responseObject = MigrationResponse.class, + entityType = {ImageStore.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.14.0", diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 374237c131ae..d0fe0b39e297 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -318,7 +318,7 @@ public int compare(DataObject o1, DataObject o2) { if (parentSnapshot == null && migrationPolicy == MigrationPolicy.COMPLETE) { List dstores = sortDataStores(storageCapacities); Long storeId = dstores.get(0); - if (storeId == srcDataStoreId) { + if (storeId.equals(srcDataStoreId)) { storeId = dstores.get(1); } DataStore datastore = dataStoreManager.getDataStore(storeId, DataStoreRole.Image); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java index 94dc54e0d3cf..2d3dc961e9e1 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java @@ -71,7 +71,7 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D DataObjectResult res = new DataObjectResult(srcDataObject); DataObject destDataObject = null; try { - if (srcDataObject instanceof SnapshotInfo && snapshotChain != null && snapshotChain.keySet().contains(srcDataObject)) { + if (srcDataObject instanceof SnapshotInfo && snapshotChain != null && snapshotChain.containsKey(srcDataObject)) { for (SnapshotInfo snapshotInfo : snapshotChain.get(srcDataObject).first()) { destDataObject = destDatastore.create(snapshotInfo); snapshotInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); diff --git a/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml b/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml index a280e13a8cfb..ea1aea914136 100644 --- a/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml +++ b/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml @@ -46,6 +46,4 @@ - - diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java index 7572131353cc..d77f922f4911 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java @@ -299,7 +299,6 @@ public List listByTemplateStoreDownloadStatus(long template @Override public List listByTemplateZoneDownloadStatus(long templateId, Long zoneId, Status... status) { // get all elgible image stores - //List imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); List imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (imgStores != null) { List result = new ArrayList(); @@ -327,7 +326,6 @@ public void removeByTemplateStore(long templateId, long imageStoreId) { @Override public TemplateDataStoreVO findByTemplateZoneDownloadStatus(long templateId, Long zoneId, Status... status) { // get all elgible image stores - //List imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); List imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (imgStores != null) { for (DataStore store : imgStores) { @@ -425,7 +423,6 @@ public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, Data // get all elgible image stores List imgStores = null; if (role == DataStoreRole.Image) { - //imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); } else if (role == DataStoreRole.ImageCache) { imgStores = _storeMgr.getImageCacheStores(new ZoneScope(zoneId)); @@ -444,7 +441,6 @@ public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, Data @Override public TemplateDataStoreVO findByTemplateZoneReady(long templateId, Long zoneId) { List imgStores = null; - //imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); if (imgStores != null) { Collections.shuffle(imgStores); diff --git a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java index f9b69fe5741e..80ca46912f24 100644 --- a/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java +++ b/server/src/main/java/com/cloud/template/HypervisorTemplateAdapter.java @@ -247,8 +247,6 @@ private void createTemplateWithinZone(Long zId, TemplateProfile profile, VMTempl } Set zoneSet = new HashSet(); - Collections.shuffle(imageStores); - // For private templates choose a random store. TODO - Have a better algorithm based on size, no. of objects, load etc. for (DataStore imageStore : imageStores) { // skip data stores for a disabled zone Long zoneId = imageStore.getScope().getScopeId(); diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index 20cf36d4c236..04e300cd61eb 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -85,6 +85,8 @@ public boolean configure(String name, Map params) throws Configu _maxExecutionTimeMs = nMaxExecutionMinutes * 60 * 1000; nextSpawnTime = currentTime + _maxExecutionTimeMs; + migrateCapPerSSVM = NumbersUtil.parseInt(_configDao.getValue(Config.SecStorageMaxMigrateSessions.key()), DEFAULT_MIGRATE_SS_VM_CAPACITY); + hostSearch = _hostDao.createSearchBuilder(); hostSearch.and("dc", hostSearch.entity().getDataCenterId(), Op.EQ); hostSearch.and("status", hostSearch.entity().getStatus(), Op.EQ); @@ -151,14 +153,13 @@ public Pair scanPool(Long pool) { alreadyRunning = _secStorageVmDao.getSecStorageVmListInStates(null, dataCenterId, State.Running, State.Migrating, State.Starting); List activeCmds = findActiveCommands(dataCenterId, cutTime); - // Find running copy / migrate commands running + // Find running copy / migrate commands running arranged in ascending order of their creation time i.e., oldest first List copyCmdsInPipeline = findAllActiveCopyCommands(dataCenterId, cutTime); // Count of total hosts Integer hostsCount = _hostDao.countAllByType(Host.Type.Routing); // Maximum number of allowed SSVMs for migration task Integer maxSsvms = (hostsCount < MaxNumberOfSsvmsForMigration.value()) ? hostsCount : MaxNumberOfSsvmsForMigration.value(); - int numConcurrentCopyTasksPerSSVM = NumbersUtil.parseInt(_configDao.getValue(Config.SecStorageMaxMigrateSessions.key()), 2); - int halfLimit = (alreadyRunning.size() * numConcurrentCopyTasksPerSSVM / 2); + int halfLimit = Math.round((float) (alreadyRunning.size() * migrateCapPerSSVM) / 2); currentTime = DateUtil.currentGMTTime().getTime(); if (alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() < _standbyCapacity) { @@ -167,9 +168,8 @@ public Pair scanPool(Long pool) { return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.commandExecutor); } // Scale the number of SSVMs if the number of Copy operations is greater than the number of SSVMs running and the copy operation has been in pipeline for - // more than half of the total time allocated for secondary storage operations - - else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() > halfLimit && + // more than half of the total time allocated for secondary storage operation + else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= halfLimit && (((currentTime - copyCmdsInPipeline.get(halfLimit - 1).getCreated().getTime()) > _maxExecutionTimeMs/2 )) && (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { nextSpawnTime = currentTime + _maxExecutionTimeMs/2; @@ -178,7 +178,7 @@ else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() > halfLimit } // Scale down the number of SSVMs if the load on them has reduced - if ((copyCmdsInPipeline.size() <= halfLimit && alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() > _standbyCapacity) && alreadyRunning.size() > 1) { + if ((copyCmdsInPipeline.size() < halfLimit && alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() > _standbyCapacity) && alreadyRunning.size() > 1) { Collections.reverse(alreadyRunning); for(SecondaryStorageVmVO vm : alreadyRunning) { long count = copyCmdsInPipeline.stream().map(cmd -> cmd.getInstanceId() == vm.getId()).count(); From 082c7c73853b95ec40846aaf7b0f1229152740d5 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 8 May 2020 10:48:27 +0530 Subject: [PATCH 07/40] Refactored code --- .../api/response/MigrationResponse.java | 17 +++++++ .../image/BaseImageStoreDriverImpl.java | 48 +++++++++---------- 2 files changed, 40 insertions(+), 25 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java index 0c342ab18e5f..eef959be74dd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java @@ -18,12 +18,29 @@ package org.apache.cloudstack.api.response; import org.apache.cloudstack.api.BaseResponse; +import org.apache.cloudstack.api.EntityReference; +import com.cloud.serializer.Param; +import com.cloud.storage.ImageStore; +import com.google.gson.annotations.SerializedName; + +@EntityReference(value = ImageStore.class) public class MigrationResponse extends BaseResponse { + @SerializedName("message") + @Param(description = "Response message") private String message; + + @SerializedName("migrationtype") + @Param(description = "Type of migration requested for") private String migrationType; + + @SerializedName("success") + @Param(description = "true if operation is executed successfully") private boolean success; + MigrationResponse() { + } + public MigrationResponse(String message, String migrationType, boolean success) { this.message = message; this.migrationType = migrationType; diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index c22daceaffc1..820c4bdbc4bb 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -24,9 +24,11 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; +import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Map; +import java.util.stream.Collectors; import javax.inject.Inject; @@ -373,32 +375,26 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa s_logger.error(errMsg); answer = new Answer(cmd, false, errMsg); } else { - boolean sent = false; - // Find the first endpoint to which the command can be sent to - for (EndPoint ep : eps) { - if (getCopyCmdsCountToSpecificSSVM(ep.getId()) >= maxConcurrentCopyOpsPerSSVM) { - continue; + // select endpoint with least number of commands running on them + EndPoint endPoint = null; + Long epId = ssvmWithLeastMigrateJobs(); + if (epId == null) { + Collections.shuffle(eps); + endPoint = eps.get(0); + } else { + List remainingEps = eps.stream().filter(ep -> ep.getId() != epId ).collect(Collectors.toList()); + if (!remainingEps.isEmpty()) { + Collections.shuffle(remainingEps); + endPoint = remainingEps.get(0); + } else { + endPoint = _defaultEpSelector.getEndPointFromHostId(epId); } - - CommandExecLogVO execLog = new CommandExecLogVO(ep.getId(), _secStorageVmDao.findByInstanceName(hostDao.findById(ep.getId()).getName()).getId(), cmd.getClass().getSimpleName(), 1); - Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); - answer = ep.sendMessage(cmd); - answer.setContextParam("cmd", cmdExecId.toString()); - sent = true; - break; - } - // If both SSVMs are pre-occupied with tasks, choose the SSVM with least migrate jobs - if (!sent) { - // Picking endpoint with least number of copy commands running on it - Long epId = ssvmWithLeastMigrateJobs(); - EndPoint endPoint = _defaultEpSelector.getEndPointFromHostId(epId); - CommandExecLogVO execLog = new CommandExecLogVO(epId, _secStorageVmDao.findByInstanceName(hostDao.findById(epId).getName()).getId(), cmd.getClass().getSimpleName(), 1); - Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); - answer = endPoint.sendMessage(cmd); - answer.setContextParam("cmd", cmdExecId.toString()); } + CommandExecLogVO execLog = new CommandExecLogVO(endPoint.getId(), _secStorageVmDao.findByInstanceName(hostDao.findById(endPoint.getId()).getName()).getId(), cmd.getClass().getSimpleName(), 1); + Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); + answer = endPoint.sendMessage(cmd); + answer.setContextParam("cmd", cmdExecId.toString()); } - CopyCommandResult result = new CopyCommandResult("", answer); callback.complete(result); } @@ -486,8 +482,10 @@ private Long ssvmWithLeastMigrateJobs() { try { pstmt = txn.prepareAutoCloseStatement(query); ResultSet rs = pstmt.executeQuery(); - rs.absolute(1); - epId = (long) rs.getInt(1); + if (rs.getFetchSize() > 0) { + rs.absolute(1); + epId = (long) rs.getInt(1); + } } catch (SQLException e) { s_logger.debug("SQLException caught", e); } From 3ba480fee5fcd8ef54b9c53662a5e8003560d0c8 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 8 May 2020 14:07:40 +0530 Subject: [PATCH 08/40] Added check for templates already present on destination + code refactoring --- .../MigrateSecondaryStorageDataCmd.java | 5 ++- .../storage/image/StorageServiceImpl.java | 37 ++++++++++++---- .../image/BaseImageStoreDriverImpl.java | 43 +++++++++++-------- .../image/db/TemplateDataStoreDaoImpl.java | 8 ++-- 4 files changed, 59 insertions(+), 34 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java index 0c7a2ff31242..3a9c68ad98d1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java @@ -25,6 +25,7 @@ import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.api.response.MigrationResponse; import org.apache.cloudstack.context.CallContext; import org.apache.log4j.Logger; @@ -58,7 +59,7 @@ public class MigrateSecondaryStorageDataCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.FROM, type = CommandType.UUID, - entityType = MigrationResponse.class, + entityType = ImageStoreResponse.class, description = "id of the image store from where the data is to be migrated", required = true) private Long id; @@ -66,7 +67,7 @@ public class MigrateSecondaryStorageDataCmd extends BaseAsyncCmd { @Parameter(name = ApiConstants.MIGRATE_TO, type = CommandType.LIST, collectionType = CommandType.UUID, - entityType = MigrationResponse.class, + entityType = ImageStoreResponse.class, description = "id of the destination secondary storage pool to which the templates are to be migrated to", required = true) private List migrateTo; diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java index 2d3dc961e9e1..553223d3c234 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java @@ -30,11 +30,14 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcContext; import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.log4j.Logger; import com.cloud.secstorage.CommandExecLogDao; @@ -48,6 +51,8 @@ public class StorageServiceImpl implements SecondaryStorageService { DataMotionService motionSrv; @Inject CommandExecLogDao _cmdExecLogDao; + @Inject + TemplateDataStoreDao templateStoreDao; private class MigrateDataContext extends AsyncRpcContext { final DataObject srcData; @@ -76,32 +81,46 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D destDataObject = destDatastore.create(snapshotInfo); snapshotInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); - MigrateDataContext context = new MigrateDataContext(null, future, snapshotInfo, destDataObject, destDatastore); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); - caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); - motionSrv.copyAsync(snapshotInfo, destDataObject, caller); + migrateJob(future, snapshotInfo, destDataObject, destDatastore); } } else { + // Check if template in destination store, if yes, do not proceed + if (srcDataObject instanceof TemplateInfo) { + s_logger.debug("Checking if template present at destination"); + TemplateDataStoreVO templateStoreVO = templateStoreDao.findByStoreTemplate(destDatastore.getId(), srcDataObject.getId()); + if (templateStoreVO != null) { + String msg = "Template already exists in destination store"; + s_logger.debug(msg); + res.setResult(msg); + res.setSuccess(true); + future.complete(res); + return future; + } + } destDataObject = destDatastore.create(srcDataObject); srcDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); - MigrateDataContext context = new MigrateDataContext(null, future, srcDataObject, destDataObject, destDatastore); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); - caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); - motionSrv.copyAsync(srcDataObject, destDataObject, caller); + migrateJob(future, srcDataObject, destDataObject, destDatastore); } } catch (Exception e) { s_logger.debug("Failed to copy Data", e); if (destDataObject != null) { destDataObject.getDataStore().delete(destDataObject); - srcDataObject.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); } + srcDataObject.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); res.setResult(e.toString()); future.complete(res); } return future; } + protected void migrateJob(AsyncCallFuture future, DataObject srcDataObject, DataObject destDataObject, DataStore destDatastore) throws ExecutionException, InterruptedException { + MigrateDataContext context = new MigrateDataContext(null, future, srcDataObject, destDataObject, destDatastore); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); + motionSrv.copyAsync(srcDataObject, destDataObject, caller); + } + /** * Callback function to handle state change of source and destination data objects based on the success or failure of the migrate task */ diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index 820c4bdbc4bb..fc2558495ecd 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -364,7 +364,6 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa (srcdata.getType() == DataObjectType.VOLUME && destData.getType() == DataObjectType.VOLUME)) { int nMaxExecutionMinutes = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageCmdExecutionTimeMax.key()), 30); - int maxConcurrentCopyOpsPerSSVM = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageMaxMigrateSessions.key()), 2); CopyCommand cmd = new CopyCommand(srcdata.getTO(), destData.getTO(), nMaxExecutionMinutes * 60 * 1000, true); Answer answer = null; @@ -376,30 +375,36 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa answer = new Answer(cmd, false, errMsg); } else { // select endpoint with least number of commands running on them - EndPoint endPoint = null; - Long epId = ssvmWithLeastMigrateJobs(); - if (epId == null) { - Collections.shuffle(eps); - endPoint = eps.get(0); - } else { - List remainingEps = eps.stream().filter(ep -> ep.getId() != epId ).collect(Collectors.toList()); - if (!remainingEps.isEmpty()) { - Collections.shuffle(remainingEps); - endPoint = remainingEps.get(0); - } else { - endPoint = _defaultEpSelector.getEndPointFromHostId(epId); - } - } - CommandExecLogVO execLog = new CommandExecLogVO(endPoint.getId(), _secStorageVmDao.findByInstanceName(hostDao.findById(endPoint.getId()).getName()).getId(), cmd.getClass().getSimpleName(), 1); - Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); - answer = endPoint.sendMessage(cmd); - answer.setContextParam("cmd", cmdExecId.toString()); + answer = sendToLeastBusyEndpoint(eps, cmd); } CopyCommandResult result = new CopyCommandResult("", answer); callback.complete(result); } } + private Answer sendToLeastBusyEndpoint(List eps, CopyCommand cmd) { + Answer answer = null; + EndPoint endPoint = null; + Long epId = ssvmWithLeastMigrateJobs(); + if (epId == null) { + Collections.shuffle(eps); + endPoint = eps.get(0); + } else { + List remainingEps = eps.stream().filter(ep -> ep.getId() != epId ).collect(Collectors.toList()); + if (!remainingEps.isEmpty()) { + Collections.shuffle(remainingEps); + endPoint = remainingEps.get(0); + } else { + endPoint = _defaultEpSelector.getEndPointFromHostId(epId); + } + } + CommandExecLogVO execLog = new CommandExecLogVO(endPoint.getId(), _secStorageVmDao.findByInstanceName(hostDao.findById(endPoint.getId()).getName()).getId(), cmd.getClass().getSimpleName(), 1); + Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); + answer = endPoint.sendMessage(cmd); + answer.setContextParam("cmd", cmdExecId.toString()); + return answer; + } + @Override public boolean canCopy(DataObject srcData, DataObject destData) { if (srcData.getDataStore().getTO() instanceof NfsTO && destData.getDataStore().getTO() instanceof NfsTO) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java index d77f922f4911..2372e8444cc5 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java @@ -299,7 +299,7 @@ public List listByTemplateStoreDownloadStatus(long template @Override public List listByTemplateZoneDownloadStatus(long templateId, Long zoneId, Status... status) { // get all elgible image stores - List imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); + List imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); if (imgStores != null) { List result = new ArrayList(); for (DataStore store : imgStores) { @@ -326,7 +326,7 @@ public void removeByTemplateStore(long templateId, long imageStoreId) { @Override public TemplateDataStoreVO findByTemplateZoneDownloadStatus(long templateId, Long zoneId, Status... status) { // get all elgible image stores - List imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); + List imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); if (imgStores != null) { for (DataStore store : imgStores) { List sRes = listByTemplateStoreDownloadStatus(templateId, store.getId(), status); @@ -423,7 +423,7 @@ public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, Data // get all elgible image stores List imgStores = null; if (role == DataStoreRole.Image) { - imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); + imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); } else if (role == DataStoreRole.ImageCache) { imgStores = _storeMgr.getImageCacheStores(new ZoneScope(zoneId)); } @@ -441,7 +441,7 @@ public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, Data @Override public TemplateDataStoreVO findByTemplateZoneReady(long templateId, Long zoneId) { List imgStores = null; - imgStores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(zoneId)); + imgStores = _storeMgr.getImageStoresByScope(new ZoneScope(zoneId)); if (imgStores != null) { Collections.shuffle(imgStores); for (DataStore store : imgStores) { From b173f788da86bb34ea4d2f177b8a5cb8fe13326f Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 11 May 2020 11:55:09 +0530 Subject: [PATCH 09/40] Address review comments + make complete migration default --- .../apache/cloudstack/api/ApiConstants.java | 3 +- .../org/apache/cloudstack/api/BaseCmd.java | 2 +- .../MigrateSecondaryStorageDataCmd.java | 19 +-- .../admin/storage/UpdateImageStoreCmd.java | 10 +- .../api/response/MigrationResponse.java | 2 +- .../storage/ImageStoreService.java | 2 +- .../service/StorageOrchestrationService.java | 3 +- .../ObjectInDataStoreStateMachine.java | 1 - .../orchestration/StorageOrchestrator.java | 110 +++++++++++------- .../orchestration/VolumeOrchestrator.java | 1 + .../secstorage/CommandExecLogDaoImpl.java | 2 - .../storage/datastore/db/ImageStoreDao.java | 4 +- .../datastore/db/ImageStoreDaoImpl.java | 19 +-- .../ImageStoreProviderManagerImpl.java | 4 +- .../metrics/PrometheusExporterImpl.java | 2 +- .../ConfigurationManagerImpl.java | 2 +- .../cloud/storage/ImageStoreServiceImpl.java | 16 ++- .../ConfigurationManagerTest.java | 16 +-- .../PremiumSecondaryStorageManagerImpl.java | 6 +- 19 files changed, 112 insertions(+), 112 deletions(-) rename api/src/main/java/{com/cloud => org/apache/cloudstack}/storage/ImageStoreService.java (96%) diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 1c2cab71247c..b294e5b2e505 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -780,7 +780,8 @@ public class ApiConstants { public static final String EXITCODE = "exitcode"; public static final String TARGET_ID = "targetid"; public static final String FILES = "files"; - public static final String FROM = "from"; + public static final String SRC_POOL = "srcpool"; + public static final String DEST_POOLS = "destpools"; public static final String VOLUME_IDS = "volumeids"; public static final String ROUTER_ID = "routerid"; diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java index ad173a79ce8c..2afa2625ce34 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java @@ -39,6 +39,7 @@ import org.apache.cloudstack.network.lb.ApplicationLoadBalancerService; import org.apache.cloudstack.network.lb.InternalLoadBalancerVMService; import org.apache.cloudstack.query.QueryService; +import org.apache.cloudstack.storage.ImageStoreService; import org.apache.cloudstack.usage.UsageService; import org.apache.log4j.Logger; @@ -69,7 +70,6 @@ import com.cloud.server.ResourceMetaDataService; import com.cloud.server.TaggedResourceService; import com.cloud.storage.DataStoreProviderApiService; -import com.cloud.storage.ImageStoreService; import com.cloud.storage.StorageService; import com.cloud.storage.VolumeApiService; import com.cloud.storage.snapshot.SnapshotApiService; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java index 3a9c68ad98d1..6e6930c44fd4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java @@ -24,25 +24,17 @@ import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; -import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.ImageStoreResponse; import org.apache.cloudstack.api.response.MigrationResponse; import org.apache.cloudstack.context.CallContext; import org.apache.log4j.Logger; import com.cloud.event.EventTypes; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.NetworkRuleConflictException; -import com.cloud.exception.ResourceAllocationException; -import com.cloud.exception.ResourceUnavailableException; -import com.cloud.storage.ImageStore; import com.cloud.utils.StringUtils; @APICommand(name = MigrateSecondaryStorageDataCmd.APINAME, description = "migrates data objects from one secondary storage to destination image store(s)", responseObject = MigrationResponse.class, - entityType = {ImageStore.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.14.0", @@ -57,26 +49,25 @@ public class MigrateSecondaryStorageDataCmd extends BaseAsyncCmd { //////////////// API parameters ///////////////////// ///////////////////////////////////////////////////// - @Parameter(name = ApiConstants.FROM, + @Parameter(name = ApiConstants.SRC_POOL, type = CommandType.UUID, entityType = ImageStoreResponse.class, description = "id of the image store from where the data is to be migrated", required = true) private Long id; - @Parameter(name = ApiConstants.MIGRATE_TO, + @Parameter(name = ApiConstants.DEST_POOLS, type = CommandType.LIST, collectionType = CommandType.UUID, entityType = ImageStoreResponse.class, - description = "id of the destination secondary storage pool to which the templates are to be migrated to", + description = "id(s) of the destination secondary storage pool(s) to which the templates are to be migrated", required = true) private List migrateTo; @Parameter(name = ApiConstants.MIGRATION_TYPE, type = CommandType.STRING, description = "Balance: if you want data to be distributed evenly among the destination stores, " + - "Complete: If you want to migrate the entire data from source image store to the destination store(s)", - required = true) + "Complete: If you want to migrate the entire data from source image store to the destination store(s). Default: Complete") private String migrationType; ///////////////////////////////////////////////////// @@ -106,7 +97,7 @@ public String getEventDescription() { } @Override - public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + public void execute() { MigrationResponse response = _imageStoreService.migrateData(this); response.setObjectName("imagestore"); this.setResponseObject(response); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java index d3ae9a91d1b6..f7ff0d245b40 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java @@ -27,11 +27,6 @@ import org.apache.cloudstack.context.CallContext; import org.apache.log4j.Logger; -import com.cloud.exception.ConcurrentOperationException; -import com.cloud.exception.InsufficientCapacityException; -import com.cloud.exception.NetworkRuleConflictException; -import com.cloud.exception.ResourceAllocationException; -import com.cloud.exception.ResourceUnavailableException; import com.cloud.storage.ImageStore; @APICommand(name = UpdateImageStoreCmd.APINAME, description = "Updates image store read-only status", responseObject = ImageStoreResponse.class, entityType = {ImageStore.class}, @@ -46,7 +41,8 @@ public class UpdateImageStoreCmd extends BaseCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, required = true, description = "Image Store UUID") private Long id; - @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only") + @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, required = true, description = "If set to true, it designates the corresponding image store to read-only, " + + "hence not considering them during storage migration") private Boolean readonly; ///////////////////////////////////////////////////// @@ -66,7 +62,7 @@ public Boolean getReadonly() { ///////////////////////////////////////////////////// @Override - public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + public void execute() { ImageStore result = _storageService.updateImageStoreStatus(getId(), getReadonly()); ImageStoreResponse storeResponse = null; if (result != null) { diff --git a/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java index eef959be74dd..c67b1d2d13ee 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/MigrationResponse.java @@ -27,7 +27,7 @@ @EntityReference(value = ImageStore.class) public class MigrationResponse extends BaseResponse { @SerializedName("message") - @Param(description = "Response message") + @Param(description = "Response message from migration of secondary storage data objects") private String message; @SerializedName("migrationtype") diff --git a/api/src/main/java/com/cloud/storage/ImageStoreService.java b/api/src/main/java/org/apache/cloudstack/storage/ImageStoreService.java similarity index 96% rename from api/src/main/java/com/cloud/storage/ImageStoreService.java rename to api/src/main/java/org/apache/cloudstack/storage/ImageStoreService.java index 1189751d6c03..b8f14ad2bfaf 100644 --- a/api/src/main/java/com/cloud/storage/ImageStoreService.java +++ b/api/src/main/java/org/apache/cloudstack/storage/ImageStoreService.java @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -package com.cloud.storage; +package org.apache.cloudstack.storage; import org.apache.cloudstack.api.command.admin.storage.MigrateSecondaryStorageDataCmd; import org.apache.cloudstack.api.response.MigrationResponse; diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java index 1227c6bb7a5b..7bf845d3ec56 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/StorageOrchestrationService.java @@ -20,8 +20,7 @@ import java.util.List; import org.apache.cloudstack.api.response.MigrationResponse; - -import com.cloud.storage.ImageStoreService.MigrationPolicy; +import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy; public interface StorageOrchestrationService { MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, MigrationPolicy migrationPolicy); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java index ae547eb32699..3e6134f17026 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java @@ -50,7 +50,6 @@ enum Event { DestroyRequested, OperationSuccessed, OperationFailed, - // Added as volume converts migrationrequested to copyrequested - VolumeObject.java CopyRequested, CopyingRequested, MigrationRequested, diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index d0fe0b39e297..2ffe58f2a501 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -55,6 +55,7 @@ import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; @@ -72,7 +73,6 @@ import com.cloud.host.dao.HostDao; import com.cloud.server.StatsCollector; import com.cloud.storage.DataStoreRole; -import com.cloud.storage.ImageStoreService.MigrationPolicy; import com.cloud.storage.SnapshotVO; import com.cloud.storage.StorageService; import com.cloud.storage.StorageStats; @@ -173,37 +173,14 @@ public boolean configure(String name, Map params) throws Configu @Override public MigrationResponse migrateData(Long srcDataStoreId, List destDatastores, MigrationPolicy migrationPolicy) { List files = new LinkedList<>(); - int successCount = 0; boolean success = true; String message = null; - if (migrationPolicy == MigrationPolicy.COMPLETE) { - if (!filesReady(srcDataStoreId)) { - throw new CloudRuntimeException("Complete migration failed as there are data objects which are not Ready"); - } - } + checkIfCompleteMigrationPossible(migrationPolicy, srcDataStoreId); DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image); Map, Long>> snapshotChains = new HashMap<>(); - files.addAll(getAllValidTemplates(srcDatastore)); - files.addAll(getAllValidSnapshotChains(srcDatastore, snapshotChains)); - files.addAll(getAllValidVolumes(srcDatastore)); - - Collections.sort(files, new Comparator() { - @Override - public int compare(DataObject o1, DataObject o2) { - Long size1 = o1.getSize(); - Long size2 = o2.getSize(); - if (o1 instanceof SnapshotInfo) { - size1 = snapshotChains.get(o1).second(); - } - if (o2 instanceof SnapshotInfo) { - size2 = snapshotChains.get(o2).second(); - } - //return o2.getSize() > o1.getSize() ? 1 : -1; - return size2 > size1 ? 1 : -1; - } - }); + files = getSortedValidSourcesList(srcDatastore, snapshotChains); if (files.isEmpty()) { return new MigrationResponse("No files in Image store "+srcDatastore.getId()+ " to migrate", migrationPolicy.toString(), true); @@ -232,7 +209,7 @@ public int compare(DataObject o1, DataObject o2) { ThreadPoolExecutor executor = new ThreadPoolExecutor(numConcurrentCopyTasksPerSSVM , numConcurrentCopyTasksPerSSVM, 30, TimeUnit.MINUTES, new MigrateBlockingQueue<>(numConcurrentCopyTasksPerSSVM)); Date start = new Date(); - if (meanstddev < threshold) { + if (meanstddev < threshold && migrationPolicy == MigrationPolicy.BALANCE) { s_logger.debug("mean std deviation of the image stores is below threshold, no migration required"); response = new MigrationResponse("Migration not required as system seems balanced", migrationPolicy.toString(), true); return response; @@ -250,7 +227,6 @@ public int compare(DataObject o1, DataObject o2) { storageCapacities = getStorageCapacities(storageCapacities); List orderedDS = sortDataStores(storageCapacities); Long destDatastoreId = orderedDS.get(0); - // If there aren't anymore files available for migration or no valid Image stores available for migration // end the migration process if (chosenFileForMigration == null || destDatastoreId == null || destDatastoreId == srcDatastore.getId()) { @@ -308,6 +284,32 @@ public int compare(DataObject o1, DataObject o2) { } Date end = new Date(); // Migrate snapshots created during the migration process + handleSnapshotMigration(srcDataStoreId, start, end, migrationPolicy, futures, storageCapacities, executor); + return handleResponse(futures, migrationPolicy, message, success); + } + + + + private MigrationResponse handleResponse(List>> futures, MigrationPolicy migrationPolicy, String message, boolean success) { + int successCount = 0; + for (Future> future : futures) { + try { + AsyncCallFuture res = future.get(); + if (res.get().isSuccess()) { + successCount++; + } + } catch ( InterruptedException | ExecutionException e) { + s_logger.warn("Failed to get result"); + continue; + } + } + message += ". successful migrations: "+successCount; + return new MigrationResponse(message, migrationPolicy.toString(), success); + } + + private void handleSnapshotMigration(Long srcDataStoreId, Date start, Date end, MigrationPolicy policy, + List>> futures, Map> storageCapacities, ThreadPoolExecutor executor) { + DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image); List snaps = snapshotDataStoreDao.findSnapshots(srcDataStoreId, start, end); if (!snaps.isEmpty()) { for (SnapshotDataStoreVO snap : snaps) { @@ -315,7 +317,7 @@ public int compare(DataObject o1, DataObject o2) { SnapshotInfo snapshotInfo = snapshotFactory.getSnapshot(snapshotVO.getSnapshotId(), DataStoreRole.Image); SnapshotInfo parentSnapshot = snapshotInfo.getParent(); - if (parentSnapshot == null && migrationPolicy == MigrationPolicy.COMPLETE) { + if (parentSnapshot == null && policy == MigrationPolicy.COMPLETE) { List dstores = sortDataStores(storageCapacities); Long storeId = dstores.get(0); if (storeId.equals(srcDataStoreId)) { @@ -332,20 +334,6 @@ public int compare(DataObject o1, DataObject o2) { } } } - - for (Future> future : futures) { - try { - AsyncCallFuture res = future.get(); - if (res.get().isSuccess()) { - successCount++; - } - } catch ( InterruptedException | ExecutionException e) { - s_logger.warn("Failed to get result"); - continue; - } - } - message += ". successful migrations: "+successCount; - return new MigrationResponse(message, migrationPolicy.toString(), success); } private Map> getStorageCapacities(Map> storageCapacities) { @@ -465,6 +453,7 @@ private boolean shouldMigrate(DataObject chosenFile, Long srcDatastoreId, Long d if (meanStdDevCurrent > threshold && storageCapacityBelowThreshold(storageCapacities, destDatastoreId)) { return true; } + return true; } else { if (storageCapacityBelowThreshold(storageCapacities, destDatastoreId)) { return true; @@ -513,6 +502,39 @@ private boolean filesReady(Long srcDataStoreId) { return isReady; } + private void checkIfCompleteMigrationPossible(MigrationPolicy policy, Long srcDataStoreId) { + if (policy == MigrationPolicy.COMPLETE) { + if (!filesReady(srcDataStoreId)) { + throw new CloudRuntimeException("Complete migration failed as there are data objects which are not Ready"); + } + } + return; + } + + private List getSortedValidSourcesList(DataStore srcDataStore, Map, Long>> snapshotChains) { + List files = new ArrayList<>(); + files.addAll(getAllValidTemplates(srcDataStore)); + files.addAll(getAllValidSnapshotsAndChains(srcDataStore, snapshotChains)); + files.addAll(getAllValidVolumes(srcDataStore)); + + Collections.sort(files, new Comparator() { + @Override + public int compare(DataObject o1, DataObject o2) { + Long size1 = o1.getSize(); + Long size2 = o2.getSize(); + if (o1 instanceof SnapshotInfo) { + size1 = snapshotChains.get(o1).second(); + } + if (o2 instanceof SnapshotInfo) { + size2 = snapshotChains.get(o2).second(); + } + return size2 > size1 ? 1 : -1; + } + }); + + return files; + } + // Gets list of all valid templates, i.e, templates in "Ready" state for migration private List getAllValidTemplates(DataStore srcDataStore) { @@ -531,7 +553,7 @@ private List getAllValidTemplates(DataStore srcDataStore) { * for each parent snapshot and the cumulative size of the chain - this is done to ensure that all the snapshots in a chain * are migrated to the same datastore */ - private List getAllValidSnapshotChains(DataStore srcDataStore, Map, Long>> snapshotChains) { + private List getAllValidSnapshotsAndChains(DataStore srcDataStore, Map, Long>> snapshotChains) { List files = new LinkedList<>(); List snapshots = snapshotDataStoreDao.listByStoreId(srcDataStore.getId(), DataStoreRole.Image); for (SnapshotDataStoreVO snapshot : snapshots) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index 6a0a40f24e19..9e2168e0bfd0 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -141,6 +141,7 @@ import com.cloud.vm.dao.UserVmDao; public class VolumeOrchestrator extends ManagerBase implements VolumeOrchestrationService, Configurable { + public enum UserVmCloneType { full, linked } diff --git a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java index 048064e142cd..f89a1bbf4ccb 100644 --- a/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/secstorage/CommandExecLogDaoImpl.java @@ -57,6 +57,4 @@ public Integer getCopyCmdCountForSSVM(Long id) { List copyCmds = customSearch(sc, null); return copyCmds.size(); } - - } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java index 9ade4f9d0881..84cba70e8617 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDao.java @@ -29,9 +29,7 @@ public interface ImageStoreDao extends GenericDao { List findByProvider(String provider); - List findByScope(ZoneScope scope); - - List findByScopeExcludingReadOnly(ZoneScope scope); + List findByZone(ZoneScope scope, Boolean readonly); List findRegionImageStores(); diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java index 5b73ec2a5e19..0350b2907473 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java @@ -77,7 +77,7 @@ public List findByProvider(String provider) { } @Override - public List findByScope(ZoneScope scope) { + public List findByZone(ZoneScope scope, Boolean readonly) { SearchCriteria sc = createSearchCriteria(); sc.addAnd("role", SearchCriteria.Op.EQ, DataStoreRole.Image); if (scope.getScopeId() != null) { @@ -85,26 +85,15 @@ public List findByScope(ZoneScope scope) { scc.addOr("scope", SearchCriteria.Op.EQ, ScopeType.REGION); scc.addOr("dcId", SearchCriteria.Op.EQ, scope.getScopeId()); sc.addAnd("scope", SearchCriteria.Op.SC, scc); + if (readonly != null) { + sc.addAnd("readonly", SearchCriteria.Op.EQ, readonly); + } } // we should return all image stores if cross-zone scope is passed // (scopeId = null) return listBy(sc); } - @Override - public List findByScopeExcludingReadOnly(ZoneScope scope) { - SearchCriteria sc = createSearchCriteria(); - sc.addAnd("role", SearchCriteria.Op.EQ, DataStoreRole.Image); - if (scope.getScopeId() != null) { - SearchCriteria scc = createSearchCriteria(); - scc.addOr("scope", SearchCriteria.Op.EQ, ScopeType.REGION); - scc.addOr("dcId", SearchCriteria.Op.EQ, scope.getScopeId()); - sc.addAnd("scope", SearchCriteria.Op.SC, scc); - sc.addAnd("readonly", SearchCriteria.Op.EQ, Boolean.FALSE); - } - return listBy(sc); - } - @Override public List findRegionImageStores() { SearchCriteria sc = regionSearch.create(); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java index d1670e879fb9..38477908db08 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java @@ -115,7 +115,7 @@ public List listImageCacheStores() { @Override public List listImageStoresByScope(ZoneScope scope) { - List stores = dataStoreDao.findByScope(scope); + List stores = dataStoreDao.findByZone(scope, null); List imageStores = new ArrayList(); for (ImageStoreVO store : stores) { imageStores.add(getImageStore(store.getId())); @@ -127,7 +127,7 @@ public List listImageStoresByScope(ZoneScope scope) { public List listImageStoresByScopeExcludingReadOnly(ZoneScope scope) { String allocationAlgorithm = configDao.getValue(Config.ImageStoreAllocationAlgorithm.key()); - List stores = dataStoreDao.findByScopeExcludingReadOnly(scope); + List stores = dataStoreDao.findByZone(scope, Boolean.FALSE); List imageStores = new ArrayList(); for (ImageStoreVO store : stores) { imageStores.add(getImageStore(store.getId())); diff --git a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java index 92c128b27fe1..7a1fb0c5af8a 100644 --- a/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java +++ b/plugins/integrations/prometheus/src/main/java/org/apache/cloudstack/metrics/PrometheusExporterImpl.java @@ -220,7 +220,7 @@ private void addStorageMetrics(final List metricsList, final long dcId, fi metricsList.add(new ItemPool(zoneName, zoneUuid, poolName, poolPath, "primary", poolFactor, TOTAL, totalCapacity)); } - for (final ImageStore imageStore : imageStoreDao.findByScope(new ZoneScope(dcId))) { + for (final ImageStore imageStore : imageStoreDao.findByZone(new ZoneScope(dcId), null)) { final StorageStats stats = ApiDBUtils.getSecondaryStorageStatistics(imageStore.getId()); metricsList.add(new ItemPool(zoneName, zoneUuid, imageStore.getName(), imageStore.getUrl(), "secondary", null, USED, stats != null ? stats.getByteUsed() : 0)); metricsList.add(new ItemPool(zoneName, zoneUuid, imageStore.getName(), imageStore.getUrl(), "secondary", null, TOTAL, stats != null ? stats.getCapacityBytes() : 0)); diff --git a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java index 7e9c9d39c2b1..f95cc6556dd9 100755 --- a/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java +++ b/server/src/main/java/com/cloud/configuration/ConfigurationManagerImpl.java @@ -1708,7 +1708,7 @@ protected void checkIfZoneIsDeletable(final long zoneId) { } //check if there are any secondary stores attached to the zone - if(!_imageStoreDao.findByScope(new ZoneScope(zoneId)).isEmpty()) { + if(!_imageStoreDao.findByZone(new ZoneScope(zoneId), null).isEmpty()) { throw new CloudRuntimeException(errorMsg + "there are Secondary storages in this zone"); } diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index 9291321e9576..bcddd67c0eed 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -29,6 +29,8 @@ import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.jobs.AsyncJobManager; +import org.apache.cloudstack.storage.ImageStoreService; +import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.commons.lang3.EnumUtils; @@ -69,13 +71,17 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { Long srcImgStoreId = cmd.getId(); ImageStoreVO srcImageVO = imageStoreDao.findById(srcImgStoreId); List destImgStoreIds = cmd.getMigrateTo(); - String migrationType = cmd.getMigrationType().toUpperCase(); + String migrationType = cmd.getMigrationType(); - if (!EnumUtils.isValidEnum(MigrationPolicy.class, migrationType)) { - throw new CloudRuntimeException("Not a valid migration policy"); - } + // default policy is complete + MigrationPolicy policy = MigrationPolicy.COMPLETE; - MigrationPolicy policy = MigrationPolicy.valueOf(migrationType); + if (migrationType != null) { + if (!EnumUtils.isValidEnum(MigrationPolicy.class, migrationType.toUpperCase())) { + throw new CloudRuntimeException("Not a valid migration policy"); + } + policy = MigrationPolicy.valueOf(migrationType.toUpperCase()); + } String message = null; diff --git a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java index e6953296db03..8c11f7730eb1 100644 --- a/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java +++ b/server/src/test/java/com/cloud/configuration/ConfigurationManagerTest.java @@ -694,7 +694,7 @@ public void checkIfZoneIsDeletableSuccessTest() { Mockito.when(_vmInstanceDao.listByZoneId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_volumeDao.findByDc(anyLong())).thenReturn(new ArrayList()); Mockito.when(_physicalNetworkDao.listByZone(anyLong())).thenReturn(new ArrayList()); - Mockito.when(_imageStoreDao.findByScope(any(ZoneScope.class))).thenReturn(new ArrayList()); + Mockito.when(_imageStoreDao.findByZone(any(ZoneScope.class), anyBoolean())).thenReturn(new ArrayList()); configurationMgr.checkIfZoneIsDeletable(new Random().nextLong()); } @@ -712,7 +712,7 @@ public void checkIfZoneIsDeletableFailureOnHostTest() { Mockito.when(_vmInstanceDao.listByZoneId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_volumeDao.findByDc(anyLong())).thenReturn(new ArrayList()); Mockito.when(_physicalNetworkDao.listByZone(anyLong())).thenReturn(new ArrayList()); - Mockito.when(_imageStoreDao.findByScope(any(ZoneScope.class))).thenReturn(new ArrayList()); + Mockito.when(_imageStoreDao.findByZone(any(ZoneScope.class), anyBoolean())).thenReturn(new ArrayList()); configurationMgr.checkIfZoneIsDeletable(new Random().nextLong()); } @@ -730,7 +730,7 @@ public void checkIfZoneIsDeletableFailureOnPodTest() { Mockito.when(_vmInstanceDao.listByZoneId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_volumeDao.findByDc(anyLong())).thenReturn(new ArrayList()); Mockito.when(_physicalNetworkDao.listByZone(anyLong())).thenReturn(new ArrayList()); - Mockito.when(_imageStoreDao.findByScope(any(ZoneScope.class))).thenReturn(new ArrayList()); + Mockito.when(_imageStoreDao.findByZone(any(ZoneScope.class), anyBoolean())).thenReturn(new ArrayList()); configurationMgr.checkIfZoneIsDeletable(new Random().nextLong()); } @@ -744,7 +744,7 @@ public void checkIfZoneIsDeletableFailureOnPrivateIpAddressTest() { Mockito.when(_vmInstanceDao.listByZoneId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_volumeDao.findByDc(anyLong())).thenReturn(new ArrayList()); Mockito.when(_physicalNetworkDao.listByZone(anyLong())).thenReturn(new ArrayList()); - Mockito.when(_imageStoreDao.findByScope(any(ZoneScope.class))).thenReturn(new ArrayList()); + Mockito.when(_imageStoreDao.findByZone(any(ZoneScope.class), anyBoolean())).thenReturn(new ArrayList()); configurationMgr.checkIfZoneIsDeletable(new Random().nextLong()); } @@ -758,7 +758,7 @@ public void checkIfZoneIsDeletableFailureOnPublicIpAddressTest() { Mockito.when(_vmInstanceDao.listByZoneId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_volumeDao.findByDc(anyLong())).thenReturn(new ArrayList()); Mockito.when(_physicalNetworkDao.listByZone(anyLong())).thenReturn(new ArrayList()); - Mockito.when(_imageStoreDao.findByScope(any(ZoneScope.class))).thenReturn(new ArrayList()); + Mockito.when(_imageStoreDao.findByZone(any(ZoneScope.class), anyBoolean())).thenReturn(new ArrayList()); configurationMgr.checkIfZoneIsDeletable(new Random().nextLong()); } @@ -776,7 +776,7 @@ public void checkIfZoneIsDeletableFailureOnVmInstanceTest() { Mockito.when(_vmInstanceDao.listByZoneId(anyLong())).thenReturn(arrayList); Mockito.when(_volumeDao.findByDc(anyLong())).thenReturn(new ArrayList()); Mockito.when(_physicalNetworkDao.listByZone(anyLong())).thenReturn(new ArrayList()); - Mockito.when(_imageStoreDao.findByScope(any(ZoneScope.class))).thenReturn(new ArrayList()); + Mockito.when(_imageStoreDao.findByZone(any(ZoneScope.class), anyBoolean())).thenReturn(new ArrayList()); configurationMgr.checkIfZoneIsDeletable(new Random().nextLong()); } @@ -794,7 +794,7 @@ public void checkIfZoneIsDeletableFailureOnVolumeTest() { Mockito.when(_vmInstanceDao.listByZoneId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_volumeDao.findByDc(anyLong())).thenReturn(arrayList); Mockito.when(_physicalNetworkDao.listByZone(anyLong())).thenReturn(new ArrayList()); - Mockito.when(_imageStoreDao.findByScope(any(ZoneScope.class))).thenReturn(new ArrayList()); + Mockito.when(_imageStoreDao.findByZone(any(ZoneScope.class), anyBoolean())).thenReturn(new ArrayList()); configurationMgr.checkIfZoneIsDeletable(new Random().nextLong()); } @@ -812,7 +812,7 @@ public void checkIfZoneIsDeletableFailureOnPhysicalNetworkTest() { Mockito.when(_vmInstanceDao.listByZoneId(anyLong())).thenReturn(new ArrayList()); Mockito.when(_volumeDao.findByDc(anyLong())).thenReturn(new ArrayList()); Mockito.when(_physicalNetworkDao.listByZone(anyLong())).thenReturn(arrayList); - Mockito.when(_imageStoreDao.findByScope(any(ZoneScope.class))).thenReturn(new ArrayList()); + Mockito.when(_imageStoreDao.findByZone(any(ZoneScope.class), anyBoolean())).thenReturn(new ArrayList()); configurationMgr.checkIfZoneIsDeletable(new Random().nextLong()); } diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index 04e300cd61eb..bb17d1805769 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -59,7 +59,7 @@ public class PremiumSecondaryStorageManagerImpl extends SecondaryStorageManagerI private int _standbyCapacity = SecondaryStorageVmManager.DEFAULT_STANDBY_CAPACITY; private int _maxExecutionTimeMs = 1800000; long currentTime = DateUtil.currentGMTTime().getTime(); - long nextSpawnTime = currentTime + _maxExecutionTimeMs; + long nextSpawnTime = currentTime + _maxExecutionTimeMs/2; private List migrationSSVMS = new ArrayList<>(); @Inject @@ -83,7 +83,7 @@ public boolean configure(String name, Map params) throws Configu int nMaxExecutionMinutes = NumbersUtil.parseInt(_configDao.getValue(Config.SecStorageCmdExecutionTimeMax.key()), 30); _maxExecutionTimeMs = nMaxExecutionMinutes * 60 * 1000; - nextSpawnTime = currentTime + _maxExecutionTimeMs; + nextSpawnTime = currentTime + _maxExecutionTimeMs/2; migrateCapPerSSVM = NumbersUtil.parseInt(_configDao.getValue(Config.SecStorageMaxMigrateSessions.key()), DEFAULT_MIGRATE_SS_VM_CAPACITY); @@ -173,7 +173,7 @@ else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= halfLimi (((currentTime - copyCmdsInPipeline.get(halfLimit - 1).getCreated().getTime()) > _maxExecutionTimeMs/2 )) && (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { nextSpawnTime = currentTime + _maxExecutionTimeMs/2; - s_logger.debug("scaling SSVM"); + s_logger.debug("scaling SSVM to handle migration tasks"); return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.templateProcessor); } From 452fe520fbc5ab61c965237febd02e6f75a895bd Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 11 May 2020 13:59:44 +0530 Subject: [PATCH 10/40] Modularized storageOrchestrator class --- .../orchestration/DataMigrationUtility.java | 262 ++++++++++++++++++ .../orchestration/StorageOrchestrator.java | 235 +--------------- ...ring-engine-orchestration-core-context.xml | 2 + .../api/query/dao/TemplateJoinDaoImpl.java | 2 + 4 files changed, 280 insertions(+), 221 deletions(-) create mode 100644 engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java new file mode 100644 index 000000000000..fe5d15606e76 --- /dev/null +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java @@ -0,0 +1,262 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.cloudstack.engine.orchestration; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; + +import javax.inject.Inject; + +import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; +import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; +import org.apache.cloudstack.storage.ImageStoreService; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; +import org.apache.log4j.Logger; + +import com.cloud.host.HostVO; +import com.cloud.host.Status; +import com.cloud.host.dao.HostDao; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.SnapshotVO; +import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.dao.SnapshotDao; +import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.utils.Pair; +import com.cloud.utils.exception.CloudRuntimeException; +import com.cloud.vm.SecondaryStorageVm; +import com.cloud.vm.SecondaryStorageVmVO; +import com.cloud.vm.VirtualMachine; +import com.cloud.vm.dao.SecondaryStorageVmDao; + +public class DataMigrationUtility { + @Inject + SecondaryStorageVmDao secStorageVmDao; + @Inject + TemplateDataStoreDao templateDataStoreDao; + @Inject + SnapshotDataStoreDao snapshotDataStoreDao; + @Inject + VolumeDataStoreDao volumeDataStoreDao; + @Inject + VMTemplateDao templateDao; + @Inject + VolumeDataFactory volumeFactory; + @Inject + TemplateDataFactory templateFactory; + @Inject + SnapshotDataFactory snapshotFactory; + @Inject + HostDao hostDao; + @Inject + SnapshotDao snapshotDao; + + private static final Logger s_logger = Logger.getLogger(DataMigrationUtility.class); + + /** This function verifies if the given image store comprises of data objects that are not in either the "Ready" or + * "Allocated" state - in such a case, if the migration policy is complete, the migration is terminated + */ + private boolean filesReady(Long srcDataStoreId) { + String[] validStates = new String[]{"Ready", "Allocated"}; + boolean isReady = true; + List templates = templateDataStoreDao.listByStoreId(srcDataStoreId); + for (TemplateDataStoreVO template : templates) { + isReady &= (Arrays.asList(validStates).contains(template.getState().toString())); + } + List snapshots = snapshotDataStoreDao.listByStoreId(srcDataStoreId, DataStoreRole.Image); + for (SnapshotDataStoreVO snapshot : snapshots) { + isReady &= (Arrays.asList(validStates).contains(snapshot.getState().toString())); + } + List volumes = volumeDataStoreDao.listByStoreId(srcDataStoreId); + for (VolumeDataStoreVO volume : volumes) { + isReady &= (Arrays.asList(validStates).contains(volume.getState().toString())); + } + return isReady; + } + + protected void checkIfCompleteMigrationPossible(ImageStoreService.MigrationPolicy policy, Long srcDataStoreId) { + if (policy == ImageStoreService.MigrationPolicy.COMPLETE) { + if (!filesReady(srcDataStoreId)) { + throw new CloudRuntimeException("Complete migration failed as there are data objects which are not Ready"); + } + } + return; + } + + protected Long getFileSize(DataObject file, Map, Long>> snapshotChain) { + Long size = file.getSize(); + Pair, Long> chain = snapshotChain.get(file); + if (file instanceof SnapshotInfo && chain.first() != null) { + size = chain.second(); + } + return size; + } + + /** + * Sorts the datastores in decreasing order of their free capacities, so as to make + * an informed decision of picking the datastore with maximum free capactiy for migration + */ + protected List sortDataStores(Map> storageCapacities) { + List>> list = + new LinkedList>>((storageCapacities.entrySet())); + + Collections.sort(list, new Comparator>>() { + @Override + public int compare(Map.Entry> e1, Map.Entry> e2) { + return e2.getValue().first() > e1.getValue().first() ? 1 : -1; + } + }); + HashMap> temp = new LinkedHashMap<>(); + for (Map.Entry> value : list) { + temp.put(value.getKey(), value.getValue()); + } + + return new ArrayList<>(temp.keySet()); + } + + protected List getSortedValidSourcesList(DataStore srcDataStore, Map, Long>> snapshotChains) { + List files = new ArrayList<>(); + files.addAll(getAllValidTemplates(srcDataStore)); + files.addAll(getAllValidSnapshotsAndChains(srcDataStore, snapshotChains)); + files.addAll(getAllValidVolumes(srcDataStore)); + + files = sortFilesOnSize(files, snapshotChains); + + return files; + } + + protected List sortFilesOnSize(List files, Map, Long>> snapshotChains) { + Collections.sort(files, new Comparator() { + @Override + public int compare(DataObject o1, DataObject o2) { + Long size1 = o1.getSize(); + Long size2 = o2.getSize(); + if (o1 instanceof SnapshotInfo) { + size1 = snapshotChains.get(o1).second(); + } + if (o2 instanceof SnapshotInfo) { + size2 = snapshotChains.get(o2).second(); + } + return (int) (size2 - size1); + } + }); + return files; + } + + // Gets list of all valid templates, i.e, templates in "Ready" state for migration + protected List getAllValidTemplates(DataStore srcDataStore) { + + List files = new LinkedList<>(); + List templates = templateDataStoreDao.listByStoreId(srcDataStore.getId()); + for (TemplateDataStoreVO template : templates) { + VMTemplateVO templateVO = templateDao.findById(template.getTemplateId()); + if (template.getState() == ObjectInDataStoreStateMachine.State.Ready && !templateVO.isPublicTemplate()) { + files.add(templateFactory.getTemplate(template.getTemplateId(), srcDataStore)); + } + } + return files; + } + + /** Returns parent snapshots and snapshots that do not have any children; snapshotChains comprises of the snapshot chain info + * for each parent snapshot and the cumulative size of the chain - this is done to ensure that all the snapshots in a chain + * are migrated to the same datastore + */ + protected List getAllValidSnapshotsAndChains(DataStore srcDataStore, Map, Long>> snapshotChains) { + List files = new LinkedList<>(); + List snapshots = snapshotDataStoreDao.listByStoreId(srcDataStore.getId(), DataStoreRole.Image); + for (SnapshotDataStoreVO snapshot : snapshots) { + SnapshotVO snapshotVO = snapshotDao.findById(snapshot.getSnapshotId()); + if (snapshot.getState() == ObjectInDataStoreStateMachine.State.Ready && snapshot.getParentSnapshotId() == 0 ) { + SnapshotInfo snap = snapshotFactory.getSnapshot(snapshotVO.getSnapshotId(), DataStoreRole.Image); + files.add(snap); + } + } + + for (SnapshotInfo parent : files) { + List chain = new ArrayList<>(); + chain.add(parent); + for (int i =0; i< chain.size(); i++) { + SnapshotInfo child = chain.get(i); + List children = child.getChildren(); + if (children != null) { + chain.addAll(children); + } + } + snapshotChains.put(parent, new Pair, Long>(chain, getSizeForChain(chain))); + } + + return (List) (List) files; + } + + // Finds the cumulative file size for all data objects in the chain + protected Long getSizeForChain(List chain) { + Long size = 0L; + for (SnapshotInfo snapshot : chain) { + size += snapshot.getSize(); + } + return size; + } + + // Returns a list of volumes that are in "Ready" state + protected List getAllValidVolumes(DataStore srcDataStore) { + List files = new LinkedList<>(); + List volumes = volumeDataStoreDao.listByStoreId(srcDataStore.getId()); + for (VolumeDataStoreVO volume : volumes) { + if (volume.getState() == ObjectInDataStoreStateMachine.State.Ready) { + files.add(volumeFactory.getVolume(volume.getVolumeId(), srcDataStore)); + } + } + return files; + } + + /** Returns the count of active SSVMs - SSVM with agents in connected state, so as to dynamically increase the thread pool + * size when SSVMs scale + */ + protected int activeSSVMCount(DataStore dataStore) { + long datacenterId = dataStore.getScope().getScopeId(); + List ssvms = + secStorageVmDao.getSecStorageVmListInStates(SecondaryStorageVm.Role.templateProcessor, datacenterId, VirtualMachine.State.Running, VirtualMachine.State.Migrating); + int activeSSVMs = 0; + for (SecondaryStorageVmVO vm : ssvms) { + String name = "s-"+vm.getId()+"-VM"; + HostVO ssHost = hostDao.findByName(name); + if (ssHost != null) { + if (ssHost.getState() == Status.Up) { + activeSSVMs++; + } + } + } + return activeSSVMs; + } +} diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 2ffe58f2a501..6e5db3ce36c5 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -18,13 +18,9 @@ package org.apache.cloudstack.engine.orchestration; import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Comparator; import java.util.Date; import java.util.HashMap; import java.util.Hashtable; -import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -43,99 +39,70 @@ import org.apache.cloudstack.engine.subsystem.api.storage.DataObject; import org.apache.cloudstack.engine.subsystem.api.storage.DataStore; import org.apache.cloudstack.engine.subsystem.api.storage.DataStoreManager; -import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService; import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService.DataObjectResult; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotDataFactory; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; -import org.apache.cloudstack.engine.subsystem.api.storage.TemplateDataFactory; -import org.apache.cloudstack.engine.subsystem.api.storage.VolumeDataFactory; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; -import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy; -import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; -import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.commons.math3.stat.descriptive.moment.Mean; import org.apache.commons.math3.stat.descriptive.moment.StandardDeviation; import org.apache.log4j.Logger; import com.cloud.configuration.Config; -import com.cloud.host.HostVO; -import com.cloud.host.Status; -import com.cloud.host.dao.HostDao; import com.cloud.server.StatsCollector; import com.cloud.storage.DataStoreRole; import com.cloud.storage.SnapshotVO; import com.cloud.storage.StorageService; import com.cloud.storage.StorageStats; -import com.cloud.storage.VMTemplateVO; import com.cloud.storage.dao.SnapshotDao; -import com.cloud.storage.dao.VMTemplateDao; import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.exception.CloudRuntimeException; -import com.cloud.vm.SecondaryStorageVm; -import com.cloud.vm.SecondaryStorageVmVO; -import com.cloud.vm.VirtualMachine; -import com.cloud.vm.dao.SecondaryStorageVmDao; public class StorageOrchestrator extends ManagerBase implements StorageOrchestrationService, Configurable { private static final Logger s_logger = Logger.getLogger(StorageOrchestrator.class); @Inject - TemplateDataStoreDao templateDataStoreDao; - @Inject SnapshotDataStoreDao snapshotDataStoreDao; @Inject - VolumeDataStoreDao volumeDataStoreDao; - @Inject - VolumeDataFactory volumeFactory; - @Inject - VMTemplateDao templateDao; - @Inject - TemplateDataFactory templateFactory; - @Inject SnapshotDao snapshotDao; @Inject SnapshotDataFactory snapshotFactory; @Inject DataStoreManager dataStoreManager; @Inject - ImageStoreDao imageStoreDao; - @Inject StatsCollector statsCollector; @Inject public StorageService storageService; @Inject - SecondaryStorageVmDao secStorageVmDao; - @Inject ConfigurationDao configDao; @Inject - HostDao hostDao; + private SecondaryStorageService secStgSrv; @Inject - private AsyncJobManager jobMgr; + TemplateDataStoreDao templateDataStoreDao; @Inject - private SecondaryStorageService secStgSrv; + VolumeDataStoreDao volumeDataStoreDao; + @Inject + DataMigrationUtility migrationHelper; ConfigKey ImageStoreImbalanceThreshold = new ConfigKey<>("Advanced", Double.class, "image.store.imbalance.threshold", - "0.1", + "0.5", "The storage imbalance threshold that is compared with the standard deviation percentage for a storage utilization metric. " + "The value is a percentage in decimal format.", true, ConfigKey.Scope.Global); Integer numConcurrentCopyTasksPerSSVM = 2; - private double imageStoreCapacityThreshold = 0.90; @Override @@ -176,11 +143,11 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto boolean success = true; String message = null; - checkIfCompleteMigrationPossible(migrationPolicy, srcDataStoreId); + migrationHelper.checkIfCompleteMigrationPossible(migrationPolicy, srcDataStoreId); DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image); Map, Long>> snapshotChains = new HashMap<>(); - files = getSortedValidSourcesList(srcDatastore, snapshotChains); + files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains); if (files.isEmpty()) { return new MigrationResponse("No files in Image store "+srcDatastore.getId()+ " to migrate", migrationPolicy.toString(), true); @@ -192,7 +159,6 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto for (Long storeId : destDatastores) { storageCapacities.put(storeId, new Pair<>(null, null)); } - storageCapacities.put(srcDataStoreId, new Pair<>(null, null)); // If the migration policy is to completely migrate data from the given source Image Store, then set it's state @@ -216,7 +182,6 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto } List>> futures = new ArrayList<>(); - while (true) { DataObject chosenFileForMigration = null; if (files.size() > 0) { @@ -225,8 +190,9 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto // Choose datastore with maximum free capacity as the destination datastore for migration storageCapacities = getStorageCapacities(storageCapacities); - List orderedDS = sortDataStores(storageCapacities); + List orderedDS = migrationHelper.sortDataStores(storageCapacities); Long destDatastoreId = orderedDS.get(0); + // If there aren't anymore files available for migration or no valid Image stores available for migration // end the migration process if (chosenFileForMigration == null || destDatastoreId == null || destDatastoreId == srcDatastore.getId()) { @@ -254,12 +220,10 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto // If there is a benefit in migration of the chosen file to the destination store, then proceed with migration if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, migrationPolicy)) { - Long fileSize = getFileSize(chosenFileForMigration, snapshotChains); + Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains); storageCapacities = assumeMigrate(storageCapacities, srcDatastore.getId(), destDatastoreId, fileSize); - - long activeSsvms = activeSSVMCount(srcDatastore); + long activeSsvms = migrationHelper.activeSSVMCount(srcDatastore); long totalJobs = activeSsvms * numConcurrentCopyTasksPerSSVM; - // Increase thread pool size with increase in number of SSVMs if ( totalJobs > executor.getCorePoolSize()) { executor.setMaximumPoolSize((int) (totalJobs)); @@ -318,7 +282,7 @@ private void handleSnapshotMigration(Long srcDataStoreId, Date start, Date end, SnapshotInfo parentSnapshot = snapshotInfo.getParent(); if (parentSnapshot == null && policy == MigrationPolicy.COMPLETE) { - List dstores = sortDataStores(storageCapacities); + List dstores = migrationHelper.sortDataStores(storageCapacities); Long storeId = dstores.get(0); if (storeId.equals(srcDataStoreId)) { storeId = dstores.get(1); @@ -371,28 +335,6 @@ private double getStandardDeviation(Map> storageCapacitie return (calculateStorageStandardDeviation(freeCapacities, mean) / mean); } - /** - * Sorts the datastores in decreasing order of their free capacities, so as to make - * an informed decision of picking the datastore with maximum free capactiy for migration - */ - private List sortDataStores(Map> storageCapacities) { - List>> list = - new LinkedList>>((storageCapacities.entrySet())); - - Collections.sort(list, new Comparator>>() { - @Override - public int compare(Map.Entry> e1, Map.Entry> e2) { - return e2.getValue().first() > e1.getValue().first() ? 1 : -1; - } - }); - HashMap> temp = new LinkedHashMap<>(); - for (Map.Entry> value : list) { - temp.put(value.getKey(), value.getValue()); - } - - return new ArrayList<>(temp.keySet()); - } - /** * * @param storageCapacities Map comprising the metrics(free and total capacities) of the images stores considered @@ -411,15 +353,6 @@ private Map> assumeMigrate(Map> st return modifiedCapacities; } - private Long getFileSize(DataObject file, Map, Long>> snapshotChain) { - Long size = file.getSize(); - Pair, Long> chain = snapshotChain.get(file); - if (file instanceof SnapshotInfo && chain.first() != null) { - size = chain.second(); - } - return size; - } - /** * This function determines if migration should in fact take place or not : * - For Balanced migration - the mean standard deviation is calculated before and after (supposed) migration @@ -440,7 +373,7 @@ private boolean shouldMigrate(DataObject chosenFile, Long srcDatastoreId, Long d if (migrationPolicy == MigrationPolicy.BALANCE) { double meanStdDevCurrent = getStandardDeviation(storageCapacities); - Long fileSize = getFileSize(chosenFile, snapshotChains); + Long fileSize = migrationHelper.getFileSize(chosenFile, snapshotChains); Map> proposedCapacities = assumeMigrate(storageCapacities, srcDatastoreId, destDatastoreId, fileSize); double meanStdDevAfter = getStandardDeviation(proposedCapacities); @@ -481,146 +414,6 @@ private double calculateStorageStandardDeviation(double[] metricValues, double m return standardDeviation.evaluate(metricValues, mean); } - /** This function verifies if the given image store comprises of data objects that are not in either the "Ready" or - * "Allocated" state - in such a case, if the migration policy is complete, the migration is terminated - */ - private boolean filesReady(Long srcDataStoreId) { - String[] validStates = new String[]{"Ready", "Allocated"}; - boolean isReady = true; - List templates = templateDataStoreDao.listByStoreId(srcDataStoreId); - for (TemplateDataStoreVO template : templates) { - isReady &= (Arrays.asList(validStates).contains(template.getState().toString())); - } - List snapshots = snapshotDataStoreDao.listByStoreId(srcDataStoreId, DataStoreRole.Image); - for (SnapshotDataStoreVO snapshot : snapshots) { - isReady &= (Arrays.asList(validStates).contains(snapshot.getState().toString())); - } - List volumes = volumeDataStoreDao.listByStoreId(srcDataStoreId); - for (VolumeDataStoreVO volume : volumes) { - isReady &= (Arrays.asList(validStates).contains(volume.getState().toString())); - } - return isReady; - } - - private void checkIfCompleteMigrationPossible(MigrationPolicy policy, Long srcDataStoreId) { - if (policy == MigrationPolicy.COMPLETE) { - if (!filesReady(srcDataStoreId)) { - throw new CloudRuntimeException("Complete migration failed as there are data objects which are not Ready"); - } - } - return; - } - - private List getSortedValidSourcesList(DataStore srcDataStore, Map, Long>> snapshotChains) { - List files = new ArrayList<>(); - files.addAll(getAllValidTemplates(srcDataStore)); - files.addAll(getAllValidSnapshotsAndChains(srcDataStore, snapshotChains)); - files.addAll(getAllValidVolumes(srcDataStore)); - - Collections.sort(files, new Comparator() { - @Override - public int compare(DataObject o1, DataObject o2) { - Long size1 = o1.getSize(); - Long size2 = o2.getSize(); - if (o1 instanceof SnapshotInfo) { - size1 = snapshotChains.get(o1).second(); - } - if (o2 instanceof SnapshotInfo) { - size2 = snapshotChains.get(o2).second(); - } - return size2 > size1 ? 1 : -1; - } - }); - - return files; - } - - // Gets list of all valid templates, i.e, templates in "Ready" state for migration - private List getAllValidTemplates(DataStore srcDataStore) { - - List files = new LinkedList<>(); - List templates = templateDataStoreDao.listByStoreId(srcDataStore.getId()); - for (TemplateDataStoreVO template : templates) { - VMTemplateVO templateVO = templateDao.findById(template.getTemplateId()); - if (template.getState() == ObjectInDataStoreStateMachine.State.Ready && !templateVO.isPublicTemplate()) { - files.add(templateFactory.getTemplate(template.getTemplateId(), srcDataStore)); - } - } - return files; - } - - /** Returns parent snapshots and snapshots that do not have any children; snapshotChains comprises of the snapshot chain info - * for each parent snapshot and the cumulative size of the chain - this is done to ensure that all the snapshots in a chain - * are migrated to the same datastore - */ - private List getAllValidSnapshotsAndChains(DataStore srcDataStore, Map, Long>> snapshotChains) { - List files = new LinkedList<>(); - List snapshots = snapshotDataStoreDao.listByStoreId(srcDataStore.getId(), DataStoreRole.Image); - for (SnapshotDataStoreVO snapshot : snapshots) { - SnapshotVO snapshotVO = snapshotDao.findById(snapshot.getSnapshotId()); - if (snapshot.getState() == ObjectInDataStoreStateMachine.State.Ready && snapshot.getParentSnapshotId() == 0 ) { - SnapshotInfo snap = snapshotFactory.getSnapshot(snapshotVO.getSnapshotId(), DataStoreRole.Image); - files.add(snap); - } - } - - for (SnapshotInfo parent : files) { - List chain = new ArrayList<>(); - chain.add(parent); - for (int i =0; i< chain.size(); i++) { - SnapshotInfo child = chain.get(i); - List children = child.getChildren(); - if (children != null) { - chain.addAll(children); - } - } - snapshotChains.put(parent, new Pair, Long>(chain, getSizeForChain(chain))); - } - - return (List) (List) files; - } - - // Finds the cumulative file size for all data objects in the chain - private Long getSizeForChain(List chain) { - Long size = 0L; - for (SnapshotInfo snapshot : chain) { - size += snapshot.getSize(); - } - return size; - } - - // Returns a list of volumes that are in "Ready" state - private List getAllValidVolumes(DataStore srcDataStore) { - List files = new LinkedList<>(); - List volumes = volumeDataStoreDao.listByStoreId(srcDataStore.getId()); - for (VolumeDataStoreVO volume : volumes) { - if (volume.getState() == ObjectInDataStoreStateMachine.State.Ready) { - files.add(volumeFactory.getVolume(volume.getVolumeId(), srcDataStore)); - } - } - return files; - } - - /** Returns the count of active SSVMs - SSVM with agents in connected state, so as to dynamically increase the thread pool - * size when SSVMs scale - */ - private int activeSSVMCount(DataStore dataStore) { - long datacenterId = dataStore.getScope().getScopeId(); - List ssvms = - secStorageVmDao.getSecStorageVmListInStates(SecondaryStorageVm.Role.templateProcessor, datacenterId, VirtualMachine.State.Running, VirtualMachine.State.Migrating); - int activeSSVMs = 0; - for (SecondaryStorageVmVO vm : ssvms) { - String name = "s-"+vm.getId()+"-VM"; - HostVO ssHost = hostDao.findByName(name); - if (ssHost != null) { - if (ssHost.getState() == Status.Up) { - activeSSVMs++; - } - } - } - return activeSSVMs; - } - private class MigrateDataTask implements Callable> { private DataObject file; private DataStore srcDataStore; diff --git a/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml b/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml index da3e377b5a28..66335a6b0579 100644 --- a/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml +++ b/engine/orchestration/src/main/resources/META-INF/cloudstack/core/spring-engine-orchestration-core-context.xml @@ -46,6 +46,8 @@ + diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 924d1edc387b..32467a8ec80c 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -104,6 +104,7 @@ protected TemplateJoinDaoImpl() { activeTmpltSearch.and("store_id", activeTmpltSearch.entity().getDataStoreId(), SearchCriteria.Op.EQ); activeTmpltSearch.and("type", activeTmpltSearch.entity().getTemplateType(), SearchCriteria.Op.EQ); activeTmpltSearch.and("templateState", activeTmpltSearch.entity().getTemplateState(), SearchCriteria.Op.EQ); + activeTmpltSearch.and("public", activeTmpltSearch.entity().isPublicTemplate(), SearchCriteria.Op.EQ); activeTmpltSearch.done(); // select distinct pair (template_id, zone_id) @@ -488,6 +489,7 @@ public List listActiveTemplates(long storeId) { sc.setParameters("store_id", storeId); sc.setParameters("type", TemplateType.USER); sc.setParameters("templateState", VirtualMachineTemplate.State.Active); + sc.setParameters("public", Boolean.FALSE); return searchIncludingRemoved(sc, null, null, false); } From 82f65569a7ec66a46f84d231948111a35bd31548 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 19 May 2020 12:38:13 +0530 Subject: [PATCH 11/40] Address review comments --- .../admin/storage/ListImageStoresCmd.java | 2 +- .../MigrateSecondaryStorageDataCmd.java | 4 +- .../admin/storage/UpdateImageStoreCmd.java | 2 +- .../com/cloud/storage/StorageManager.java | 3 + .../orchestration/DataMigrationUtility.java | 27 +++--- .../orchestration/StorageOrchestrator.java | 97 ++++++++++--------- ....java => SecondaryStorageServiceImpl.java} | 11 ++- .../storage/image/TemplateServiceImpl.java | 1 - .../ImageStoreProviderManagerImpl.java | 20 +++- ...ring-engine-storage-image-core-context.xml | 4 +- .../storage/volume/VolumeObject.java | 1 - pom.xml | 1 + server/pom.xml | 2 +- .../java/com/cloud/configuration/Config.java | 13 +-- .../cloud/storage/ImageStoreDetailsUtil.java | 1 + .../com/cloud/storage/StorageManagerImpl.java | 3 +- .../PremiumSecondaryStorageManagerImpl.java | 77 ++++++++------- .../resource/NfsSecondaryStorageResource.java | 4 +- 18 files changed, 143 insertions(+), 130 deletions(-) rename engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/{StorageServiceImpl.java => SecondaryStorageServiceImpl.java} (93%) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java index 6d11983c1420..4f7cf81f20db 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/ListImageStoresCmd.java @@ -51,7 +51,7 @@ public class ListImageStoresCmd extends BaseListCmd { @Parameter(name = ApiConstants.ID, type = CommandType.UUID, entityType = ImageStoreResponse.class, description = "the ID of the storage pool") private Long id; - @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, entityType = ImageStoreResponse.class, description = "read-only status of the image store") + @Parameter(name = ApiConstants.READ_ONLY, type = CommandType.BOOLEAN, entityType = ImageStoreResponse.class, description = "read-only status of the image store", since = "4.15.0") private Boolean readonly; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java index 6e6930c44fd4..233f1e8b1684 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java @@ -37,11 +37,11 @@ responseObject = MigrationResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, - since = "4.14.0", + since = "4.15.0", authorized = {RoleType.Admin}) public class MigrateSecondaryStorageDataCmd extends BaseAsyncCmd { - public static final Logger s_logger = Logger.getLogger(MigrateSecondaryStorageDataCmd.class.getName()); + public static final Logger LOGGER = Logger.getLogger(MigrateSecondaryStorageDataCmd.class.getName()); public static final String APINAME = "migrateSecondaryStorageData"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java index f7ff0d245b40..d7dca93b485a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/UpdateImageStoreCmd.java @@ -30,7 +30,7 @@ import com.cloud.storage.ImageStore; @APICommand(name = UpdateImageStoreCmd.APINAME, description = "Updates image store read-only status", responseObject = ImageStoreResponse.class, entityType = {ImageStore.class}, - requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) + requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.15.0") public class UpdateImageStoreCmd extends BaseCmd { private static final Logger LOG = Logger.getLogger(UpdateImageStoreCmd.class.getName()); public static final String APINAME = "updateImageStore"; diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 62a241be4345..e0fb1cfbc146 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -112,6 +112,9 @@ public interface StorageManager extends StorageService { ConfigKey PRIMARY_STORAGE_DOWNLOAD_WAIT = new ConfigKey("Storage", Integer.class, "primary.storage.download.wait", "10800", "In second, timeout for download template to primary storage", false); + ConfigKey SecStorageMaxMigrateSessions = new ConfigKey("Advanced", Integer.class, "secstorage.max.migrate.sessions", "2", + "The max number of concurrent copy command execution sessions that an SSVM can handle", true, ConfigKey.Scope.Global); + /** * Returns a comma separated list of tags for the specified storage pool * @param poolId diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java index fe5d15606e76..d768734186e0 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java @@ -43,7 +43,6 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; -import org.apache.log4j.Logger; import com.cloud.host.HostVO; import com.cloud.host.Status; @@ -82,13 +81,11 @@ public class DataMigrationUtility { @Inject SnapshotDao snapshotDao; - private static final Logger s_logger = Logger.getLogger(DataMigrationUtility.class); - /** This function verifies if the given image store comprises of data objects that are not in either the "Ready" or * "Allocated" state - in such a case, if the migration policy is complete, the migration is terminated */ - private boolean filesReady(Long srcDataStoreId) { - String[] validStates = new String[]{"Ready", "Allocated"}; + private boolean filesReadyToMigrate(Long srcDataStoreId) { + String[] validStates = new String[]{"Ready", "Allocated", "Destroying", "Destroyed", "Failed"}; boolean isReady = true; List templates = templateDataStoreDao.listByStoreId(srcDataStoreId); for (TemplateDataStoreVO template : templates) { @@ -107,7 +104,7 @@ private boolean filesReady(Long srcDataStoreId) { protected void checkIfCompleteMigrationPossible(ImageStoreService.MigrationPolicy policy, Long srcDataStoreId) { if (policy == ImageStoreService.MigrationPolicy.COMPLETE) { - if (!filesReady(srcDataStoreId)) { + if (!filesReadyToMigrate(srcDataStoreId)) { throw new CloudRuntimeException("Complete migration failed as there are data objects which are not Ready"); } } @@ -147,9 +144,9 @@ public int compare(Map.Entry> e1, Map.Entry getSortedValidSourcesList(DataStore srcDataStore, Map, Long>> snapshotChains) { List files = new ArrayList<>(); - files.addAll(getAllValidTemplates(srcDataStore)); - files.addAll(getAllValidSnapshotsAndChains(srcDataStore, snapshotChains)); - files.addAll(getAllValidVolumes(srcDataStore)); + files.addAll(getAllReadyTemplates(srcDataStore)); + files.addAll(getAllReadySnapshotsAndChains(srcDataStore, snapshotChains)); + files.addAll(getAllReadyVolumes(srcDataStore)); files = sortFilesOnSize(files, snapshotChains); @@ -168,14 +165,13 @@ public int compare(DataObject o1, DataObject o2) { if (o2 instanceof SnapshotInfo) { size2 = snapshotChains.get(o2).second(); } - return (int) (size2 - size1); + return size2 > size1 ? 1 : -1; } }); return files; } - // Gets list of all valid templates, i.e, templates in "Ready" state for migration - protected List getAllValidTemplates(DataStore srcDataStore) { + protected List getAllReadyTemplates(DataStore srcDataStore) { List files = new LinkedList<>(); List templates = templateDataStoreDao.listByStoreId(srcDataStore.getId()); @@ -192,7 +188,7 @@ protected List getAllValidTemplates(DataStore srcDataStore) { * for each parent snapshot and the cumulative size of the chain - this is done to ensure that all the snapshots in a chain * are migrated to the same datastore */ - protected List getAllValidSnapshotsAndChains(DataStore srcDataStore, Map, Long>> snapshotChains) { + protected List getAllReadySnapshotsAndChains(DataStore srcDataStore, Map, Long>> snapshotChains) { List files = new LinkedList<>(); List snapshots = snapshotDataStoreDao.listByStoreId(srcDataStore.getId(), DataStoreRole.Image); for (SnapshotDataStoreVO snapshot : snapshots) { @@ -219,7 +215,6 @@ protected List getAllValidSnapshotsAndChains(DataStore srcDataStore, return (List) (List) files; } - // Finds the cumulative file size for all data objects in the chain protected Long getSizeForChain(List chain) { Long size = 0L; for (SnapshotInfo snapshot : chain) { @@ -228,8 +223,8 @@ protected Long getSizeForChain(List chain) { return size; } - // Returns a list of volumes that are in "Ready" state - protected List getAllValidVolumes(DataStore srcDataStore) { + + protected List getAllReadyVolumes(DataStore srcDataStore) { List files = new LinkedList<>(); List volumes = volumeDataStoreDao.listByStoreId(srcDataStore.getId()); for (VolumeDataStoreVO volume : volumes) { diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 6e5db3ce36c5..91a4d23b5361 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -56,14 +56,13 @@ import org.apache.commons.math3.stat.descriptive.moment.StandardDeviation; import org.apache.log4j.Logger; -import com.cloud.configuration.Config; import com.cloud.server.StatsCollector; import com.cloud.storage.DataStoreRole; import com.cloud.storage.SnapshotVO; +import com.cloud.storage.StorageManager; import com.cloud.storage.StorageService; import com.cloud.storage.StorageStats; import com.cloud.storage.dao.SnapshotDao; -import com.cloud.utils.NumbersUtil; import com.cloud.utils.Pair; import com.cloud.utils.StringUtils; import com.cloud.utils.component.ManagerBase; @@ -133,7 +132,7 @@ public boolean offer(T task) { @Override public boolean configure(String name, Map params) throws ConfigurationException { - numConcurrentCopyTasksPerSSVM = NumbersUtil.parseInt(configDao.getValue(Config.SecStorageMaxMigrateSessions.key()), 2); + numConcurrentCopyTasksPerSSVM = StorageManager.SecStorageMaxMigrateSessions.value(); return true; } @@ -144,7 +143,6 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto String message = null; migrationHelper.checkIfCompleteMigrationPossible(migrationPolicy, srcDataStoreId); - DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image); Map, Long>> snapshotChains = new HashMap<>(); files = migrationHelper.getSortedValidSourcesList(srcDatastore, snapshotChains); @@ -152,17 +150,11 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto if (files.isEmpty()) { return new MigrationResponse("No files in Image store "+srcDatastore.getId()+ " to migrate", migrationPolicy.toString(), true); } - - // Create capacity class with free and total space, maybe id of ds too and use that as the value Map> storageCapacities = new Hashtable<>(); - for (Long storeId : destDatastores) { storageCapacities.put(storeId, new Pair<>(null, null)); } storageCapacities.put(srcDataStoreId, new Pair<>(null, null)); - - // If the migration policy is to completely migrate data from the given source Image Store, then set it's state - // to readonly if (migrationPolicy == MigrationPolicy.COMPLETE) { s_logger.debug("Setting source image store "+srcDatastore.getId()+ " to read-only"); storageService.updateImageStoreStatus(srcDataStoreId, true); @@ -172,8 +164,8 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto double meanstddev = getStandardDeviation(storageCapacities); double threshold = ImageStoreImbalanceThreshold.value(); MigrationResponse response = null; - - ThreadPoolExecutor executor = new ThreadPoolExecutor(numConcurrentCopyTasksPerSSVM , numConcurrentCopyTasksPerSSVM, 30, TimeUnit.MINUTES, new MigrateBlockingQueue<>(numConcurrentCopyTasksPerSSVM)); + ThreadPoolExecutor executor = new ThreadPoolExecutor(numConcurrentCopyTasksPerSSVM , numConcurrentCopyTasksPerSSVM, 30, + TimeUnit.MINUTES, new MigrateBlockingQueue<>(numConcurrentCopyTasksPerSSVM)); Date start = new Date(); if (meanstddev < threshold && migrationPolicy == MigrationPolicy.BALANCE) { s_logger.debug("mean std deviation of the image stores is below threshold, no migration required"); @@ -188,28 +180,14 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto chosenFileForMigration = files.remove(0); } - // Choose datastore with maximum free capacity as the destination datastore for migration storageCapacities = getStorageCapacities(storageCapacities); List orderedDS = migrationHelper.sortDataStores(storageCapacities); Long destDatastoreId = orderedDS.get(0); - // If there aren't anymore files available for migration or no valid Image stores available for migration - // end the migration process if (chosenFileForMigration == null || destDatastoreId == null || destDatastoreId == srcDatastore.getId()) { - if (destDatastoreId == srcDatastore.getId() && !files.isEmpty() ) { - if (migrationPolicy == MigrationPolicy.BALANCE) { - s_logger.debug("Migration completed : data stores have been balanced "); - message = "Image stores have been balanced"; - success = true; - } else { - message = "Files not completely migrated from "+ srcDatastore.getId() + - " If you want to continue using the Image Store, please change the read-only status using 'update imagestore' command"; - success = false; - } - } else { - message = "Migration completed"; - success = true; - } + Pair result = migrateCompleted(destDatastoreId, srcDatastore, files, migrationPolicy); + message = result.first(); + success = result.second(); break; } @@ -218,24 +196,8 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto continue; } - // If there is a benefit in migration of the chosen file to the destination store, then proceed with migration if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, migrationPolicy)) { - Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains); - storageCapacities = assumeMigrate(storageCapacities, srcDatastore.getId(), destDatastoreId, fileSize); - long activeSsvms = migrationHelper.activeSSVMCount(srcDatastore); - long totalJobs = activeSsvms * numConcurrentCopyTasksPerSSVM; - // Increase thread pool size with increase in number of SSVMs - if ( totalJobs > executor.getCorePoolSize()) { - executor.setMaximumPoolSize((int) (totalJobs)); - executor.setCorePoolSize((int) (totalJobs)); - } - - MigrateDataTask task = new MigrateDataTask(chosenFileForMigration, srcDatastore, dataStoreManager.getDataStore(destDatastoreId, DataStoreRole.Image)); - if (chosenFileForMigration instanceof SnapshotInfo ) { - task.setSnapshotChains(snapshotChains); - } - futures.add((executor.submit(task))); - s_logger.debug("Migration of file " + chosenFileForMigration.getId() + " is initiated"); + migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, srcDatastore, destDatastoreId, executor, futures); } else { if (migrationPolicy == MigrationPolicy.BALANCE) { message = "Migration completed and has successfully balanced the data objects among stores: " + StringUtils.join(storageCapacities.keySet(), ","); @@ -247,11 +209,50 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto } } Date end = new Date(); - // Migrate snapshots created during the migration process handleSnapshotMigration(srcDataStoreId, start, end, migrationPolicy, futures, storageCapacities, executor); return handleResponse(futures, migrationPolicy, message, success); } + protected Pair migrateCompleted(Long destDatastoreId, DataStore srcDatastore, List files, MigrationPolicy migrationPolicy) { + String message = ""; + boolean success = true; + if (destDatastoreId == srcDatastore.getId() && !files.isEmpty() ) { + if (migrationPolicy == MigrationPolicy.BALANCE) { + s_logger.debug("Migration completed : data stores have been balanced "); + message = "Image stores have been balanced"; + success = true; + } else { + message = "Files not completely migrated from "+ srcDatastore.getId() + + " If you want to continue using the Image Store, please change the read-only status using 'update imagestore' command"; + success = false; + } + } else { + message = "Migration completed"; + } + return new Pair(message, success); + } + + protected void migrateAway(DataObject chosenFileForMigration, Map> storageCapacities, + Map, Long>> snapshotChains, DataStore srcDatastore, Long destDatastoreId, ThreadPoolExecutor executor, + List>> futures) { + Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains); + storageCapacities = assumeMigrate(storageCapacities, srcDatastore.getId(), destDatastoreId, fileSize); + long activeSsvms = migrationHelper.activeSSVMCount(srcDatastore); + long totalJobs = activeSsvms * numConcurrentCopyTasksPerSSVM; + // Increase thread pool size with increase in number of SSVMs + if ( totalJobs > executor.getCorePoolSize()) { + executor.setMaximumPoolSize((int) (totalJobs)); + executor.setCorePoolSize((int) (totalJobs)); + } + + MigrateDataTask task = new MigrateDataTask(chosenFileForMigration, srcDatastore, dataStoreManager.getDataStore(destDatastoreId, DataStoreRole.Image)); + if (chosenFileForMigration instanceof SnapshotInfo ) { + task.setSnapshotChains(snapshotChains); + } + futures.add((executor.submit(task))); + s_logger.debug("Migration of file " + chosenFileForMigration.getId() + " is initiated"); + } + private MigrationResponse handleResponse(List>> futures, MigrationPolicy migrationPolicy, String message, boolean success) { @@ -271,7 +272,7 @@ private MigrationResponse handleResponse(List>> futures, Map> storageCapacities, ThreadPoolExecutor executor) { DataStore srcDatastore = dataStoreManager.getDataStore(srcDataStoreId, DataStoreRole.Image); List snaps = snapshotDataStoreDao.findSnapshots(srcDataStoreId, start, end); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java similarity index 93% rename from engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java rename to engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java index 553223d3c234..1505f9163a2d 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/StorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java @@ -36,6 +36,7 @@ import org.apache.cloudstack.framework.async.AsyncCompletionCallback; import org.apache.cloudstack.framework.async.AsyncRpcContext; import org.apache.cloudstack.storage.command.CopyCmdAnswer; +import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.log4j.Logger; @@ -43,9 +44,9 @@ import com.cloud.secstorage.CommandExecLogDao; import com.cloud.utils.Pair; -public class StorageServiceImpl implements SecondaryStorageService { +public class SecondaryStorageServiceImpl implements SecondaryStorageService { - private static final Logger s_logger = Logger.getLogger(StorageServiceImpl.class); + private static final Logger s_logger = Logger.getLogger(SecondaryStorageServiceImpl.class); @Inject DataMotionService motionSrv; @@ -53,6 +54,8 @@ public class StorageServiceImpl implements SecondaryStorageService { CommandExecLogDao _cmdExecLogDao; @Inject TemplateDataStoreDao templateStoreDao; + @Inject + SnapshotDataStoreDao snapshotStoreDao; private class MigrateDataContext extends AsyncRpcContext { final DataObject srcData; @@ -116,7 +119,7 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D protected void migrateJob(AsyncCallFuture future, DataObject srcDataObject, DataObject destDataObject, DataStore destDatastore) throws ExecutionException, InterruptedException { MigrateDataContext context = new MigrateDataContext(null, future, srcDataObject, destDataObject, destDatastore); - AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); + AsyncCallbackDispatcher caller = AsyncCallbackDispatcher.create(this); caller.setCallback(caller.getTarget().migrateDataCallBack(null, null)).setContext(context); motionSrv.copyAsync(srcDataObject, destDataObject, caller); } @@ -124,7 +127,7 @@ protected void migrateJob(AsyncCallFuture future, DataObject s /** * Callback function to handle state change of source and destination data objects based on the success or failure of the migrate task */ - protected Void migrateDataCallBack(AsyncCallbackDispatcher callback, MigrateDataContext context) throws ExecutionException, InterruptedException { + protected Void migrateDataCallBack(AsyncCallbackDispatcher callback, MigrateDataContext context) throws ExecutionException, InterruptedException { DataObject srcData = context.srcData; DataObject destData = context.destData; CopyCommandResult result = callback.getResult(); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java index 57148613ba58..01ff12638310 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/TemplateServiceImpl.java @@ -253,7 +253,6 @@ public void downloadBootstrapSysTemplate(DataStore store) { @Override public void handleSysTemplateDownload(HypervisorType hostHyper, Long dcId) { Set toBeDownloaded = new HashSet(); - //List stores = _storeMgr.getImageStoresByScope(new ZoneScope(dcId)); List stores = _storeMgr.getImageStoresByScopeExcludingReadOnly(new ZoneScope(dcId)); if (stores == null || stores.isEmpty()) { return; diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java index 38477908db08..1ca155cb7d9e 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/manager/ImageStoreProviderManagerImpl.java @@ -33,6 +33,8 @@ import org.apache.cloudstack.engine.subsystem.api.storage.ImageStoreProvider; import org.apache.cloudstack.engine.subsystem.api.storage.Scope; import org.apache.cloudstack.engine.subsystem.api.storage.ZoneScope; +import org.apache.cloudstack.framework.config.ConfigKey; +import org.apache.cloudstack.framework.config.Configurable; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; @@ -43,13 +45,12 @@ import org.apache.log4j.Logger; import org.springframework.stereotype.Component; -import com.cloud.configuration.Config; import com.cloud.server.StatsCollector; import com.cloud.storage.ScopeType; import com.cloud.storage.dao.VMTemplateDao; @Component -public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager { +public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager, Configurable { private static final Logger s_logger = Logger.getLogger(ImageStoreProviderManagerImpl.class); @Inject ImageStoreDao dataStoreDao; @@ -64,6 +65,9 @@ public class ImageStoreProviderManagerImpl implements ImageStoreProviderManager Map driverMaps; + static final ConfigKey ImageStoreAllocationAlgorithm = new ConfigKey("Advanced", String.class, "image.store.allocation.algorithm", "firstfitleastconsumed", + "firstfitleastconsumed','random' : Order in which hosts within a cluster will be considered for VM/volume allocation", true, ConfigKey.Scope.Global ); + @PostConstruct public void config() { driverMaps = new HashMap(); @@ -125,7 +129,7 @@ public List listImageStoresByScope(ZoneScope scope) { @Override public List listImageStoresByScopeExcludingReadOnly(ZoneScope scope) { - String allocationAlgorithm = configDao.getValue(Config.ImageStoreAllocationAlgorithm.key()); + String allocationAlgorithm = ImageStoreAllocationAlgorithm.value(); List stores = dataStoreDao.findByZone(scope, Boolean.FALSE); List imageStores = new ArrayList(); @@ -243,4 +247,14 @@ public List listImageStoresWithFreeCapacity(List imageStor } return stores; } + + @Override + public String getConfigComponentName() { + return ImageStoreProviderManager.class.getSimpleName(); + } + + @Override + public ConfigKey[] getConfigKeys() { + return new ConfigKey[] { ImageStoreAllocationAlgorithm }; + } } diff --git a/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml b/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml index ea1aea914136..805af26324bc 100644 --- a/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml +++ b/engine/storage/image/src/main/resources/META-INF/cloudstack/core/spring-engine-storage-image-core-context.xml @@ -34,8 +34,8 @@ - 0.5 2.6 2.7.0 + 3.6.1 diff --git a/server/pom.xml b/server/pom.xml index 730ef0c0c6b5..ff94b2822980 100644 --- a/server/pom.xml +++ b/server/pom.xml @@ -92,7 +92,7 @@ org.apache.commons commons-math3 - 3.6.1 + ${cs.commons-math3.version} org.apache.cloudstack diff --git a/server/src/main/java/com/cloud/configuration/Config.java b/server/src/main/java/com/cloud/configuration/Config.java index 769ff505aed7..c72eb498b1b8 100644 --- a/server/src/main/java/com/cloud/configuration/Config.java +++ b/server/src/main/java/com/cloud/configuration/Config.java @@ -905,14 +905,6 @@ public enum Config { "random", "'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit', 'firstfitleastconsumed' : Order in which hosts within a cluster will be considered for VM/volume allocation.", null), - ImageStoreAllocationAlgorithm( - "Advanced", - ManagementServer.class, - String.class, - "image.store.allocation.algorithm", - "firstfitleastconsumed", - "firstfitleastconsumed','random' : Order in which hosts within a cluster will be considered for VM/volume allocation.", - null), VmDeploymentPlanner( "Advanced", ManagementServer.class, @@ -1817,10 +1809,7 @@ public enum Config { // StatsCollector StatsOutPutGraphiteHost("Advanced", ManagementServer.class, String.class, "stats.output.uri", "", "URI to additionally send StatsCollector statistics to", null), - SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "", "PSK with SSVM", null), - - SecStorageMaxMigrateSessions( - "Advanced", AgentManager.class, Integer.class, "secstorage.max.migrate.sessions","2","The max number of concurrent copy command execution sessions that an SSVM can handle",null); + SSVMPSK("Hidden", ManagementServer.class, String.class, "upload.post.secret.key", "", "PSK with SSVM", null); private final String _category; private final Class _componentClass; diff --git a/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java b/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java index 4ed7962db97f..3e27ce6ab490 100755 --- a/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreDetailsUtil.java @@ -30,6 +30,7 @@ import com.google.common.base.Preconditions; public class ImageStoreDetailsUtil { + @Inject protected ImageStoreDao imageStoreDao; @Inject diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index d11a2fddea7a..407d8530efcd 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -2510,7 +2510,8 @@ public ConfigKey[] getConfigKeys() { KvmStorageOnlineMigrationWait, KvmAutoConvergence, MaxNumberOfManagedClusteredFileSystems, - PRIMARY_STORAGE_DOWNLOAD_WAIT + PRIMARY_STORAGE_DOWNLOAD_WAIT, + SecStorageMaxMigrateSessions }; } diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index bb17d1805769..ec8d4792eb9b 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -36,6 +36,7 @@ import com.cloud.resource.ResourceManager; import com.cloud.secstorage.CommandExecLogDao; import com.cloud.secstorage.CommandExecLogVO; +import com.cloud.storage.StorageManager; import com.cloud.storage.secondary.SecondaryStorageVmManager; import com.cloud.utils.DateUtil; import com.cloud.utils.NumbersUtil; @@ -85,7 +86,7 @@ public boolean configure(String name, Map params) throws Configu _maxExecutionTimeMs = nMaxExecutionMinutes * 60 * 1000; nextSpawnTime = currentTime + _maxExecutionTimeMs/2; - migrateCapPerSSVM = NumbersUtil.parseInt(_configDao.getValue(Config.SecStorageMaxMigrateSessions.key()), DEFAULT_MIGRATE_SS_VM_CAPACITY); + migrateCapPerSSVM = StorageManager.SecStorageMaxMigrateSessions.value(); hostSearch = _hostDao.createSearchBuilder(); hostSearch.and("dc", hostSearch.entity().getDataCenterId(), Op.EQ); @@ -151,49 +152,55 @@ public Pair scanPool(Long pool) { return new Pair(AfterScanAction.nop, null); } + alreadyRunning = _secStorageVmDao.getSecStorageVmListInStates(null, dataCenterId, State.Running, State.Migrating, State.Starting); List activeCmds = findActiveCommands(dataCenterId, cutTime); - // Find running copy / migrate commands running arranged in ascending order of their creation time i.e., oldest first List copyCmdsInPipeline = findAllActiveCopyCommands(dataCenterId, cutTime); - // Count of total hosts - Integer hostsCount = _hostDao.countAllByType(Host.Type.Routing); - // Maximum number of allowed SSVMs for migration task - Integer maxSsvms = (hostsCount < MaxNumberOfSsvmsForMigration.value()) ? hostsCount : MaxNumberOfSsvmsForMigration.value(); - int halfLimit = Math.round((float) (alreadyRunning.size() * migrateCapPerSSVM) / 2); - currentTime = DateUtil.currentGMTTime().getTime(); - - if (alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() < _standbyCapacity) { - s_logger.info("secondary storage command execution standby capactiy low (running VMs: " + alreadyRunning.size() + ", active cmds: " + activeCmds.size() + - "), starting a new one"); - return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.commandExecutor); - } - // Scale the number of SSVMs if the number of Copy operations is greater than the number of SSVMs running and the copy operation has been in pipeline for - // more than half of the total time allocated for secondary storage operation - else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= halfLimit && - (((currentTime - copyCmdsInPipeline.get(halfLimit - 1).getCreated().getTime()) > _maxExecutionTimeMs/2 )) && - (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { - nextSpawnTime = currentTime + _maxExecutionTimeMs/2; - s_logger.debug("scaling SSVM to handle migration tasks"); - return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.templateProcessor); - } + return scaleSSVMOnLoad(alreadyRunning, activeCmds, copyCmdsInPipeline); - // Scale down the number of SSVMs if the load on them has reduced - if ((copyCmdsInPipeline.size() < halfLimit && alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() > _standbyCapacity) && alreadyRunning.size() > 1) { - Collections.reverse(alreadyRunning); - for(SecondaryStorageVmVO vm : alreadyRunning) { - long count = copyCmdsInPipeline.stream().map(cmd -> cmd.getInstanceId() == vm.getId()).count(); - count += activeCmds.stream().map(cmd -> cmd.getInstanceId() == vm.getId()).count(); - if (count == 0) { - destroySecStorageVm(vm.getId()); - break; - } - } - } } + return new Pair(AfterScanAction.nop, null); + } + private Pair scaleSSVMOnLoad(List alreadyRunning, List activeCmds, + List copyCmdsInPipeline) { + Integer hostsCount = _hostDao.countAllByType(Host.Type.Routing); + Integer maxSsvms = (hostsCount < MaxNumberOfSsvmsForMigration.value()) ? hostsCount : MaxNumberOfSsvmsForMigration.value(); + int halfLimit = Math.round((float) (alreadyRunning.size() * migrateCapPerSSVM) / 2); + currentTime = DateUtil.currentGMTTime().getTime(); + if (alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() < _standbyCapacity) { + s_logger.info("secondary storage command execution standby capactiy low (running VMs: " + alreadyRunning.size() + ", active cmds: " + activeCmds.size() + + "), starting a new one"); + return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.commandExecutor); + } + else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= halfLimit && + ((Math.abs(currentTime - copyCmdsInPipeline.get(halfLimit - 1).getCreated().getTime()) > _maxExecutionTimeMs/2 )) && + (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { + nextSpawnTime = currentTime + _maxExecutionTimeMs/2; + s_logger.debug("scaling SSVM to handle migration tasks"); + return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.templateProcessor); + + } + scaleDownSSVMOnLoad(alreadyRunning, activeCmds, copyCmdsInPipeline); return new Pair(AfterScanAction.nop, null); } + private void scaleDownSSVMOnLoad(List alreadyRunning, List activeCmds, + List copyCmdsInPipeline) { + int halfLimit = Math.round((float) (alreadyRunning.size() * migrateCapPerSSVM) / 2); + if ((copyCmdsInPipeline.size() < halfLimit && alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() > _standbyCapacity) && alreadyRunning.size() > 1) { + Collections.reverse(alreadyRunning); + for(SecondaryStorageVmVO vm : alreadyRunning) { + long count = copyCmdsInPipeline.stream().map(cmd -> cmd.getInstanceId() == vm.getId()).count(); + count += activeCmds.stream().map(cmd -> cmd.getInstanceId() == vm.getId()).count(); + if (count == 0) { + destroySecStorageVm(vm.getId()); + break; + } + } + } + } + @Override public Pair assignSecStorageVm(long zoneId, Command cmd) { diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index e00b7a08960b..27628a09eb7d 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -1352,8 +1352,8 @@ protected Answer copyFromNfsToNfs(CopyCommand cmd) { } return new CopyCmdAnswer(retObj); } catch (Exception e) { - s_logger.error("failed to copy file" + srcData.getPath(), e); - return new CopyCmdAnswer("failed to copy file" + srcData.getPath() + e.toString()); + s_logger.error("failed to copy file" + srcData.getPath(), e); + return new CopyCmdAnswer("failed to copy file" + srcData.getPath() + e.toString()); } } From 141635d6386da022a1f081475de3aaedbb1304be Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 19 May 2020 17:54:55 +0530 Subject: [PATCH 12/40] updated comment --- .../engine/orchestration/DataMigrationUtility.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java index d768734186e0..ec8133ade349 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java @@ -81,8 +81,9 @@ public class DataMigrationUtility { @Inject SnapshotDao snapshotDao; - /** This function verifies if the given image store comprises of data objects that are not in either the "Ready" or - * "Allocated" state - in such a case, if the migration policy is complete, the migration is terminated + /** This function verifies if the given image store comprises of data objects that are not in the following states" + * "Ready" "Allocated", "Destroying", "Destroyed", "Failed" - in such a case, if the migration policy is complete, + * the migration is terminated */ private boolean filesReadyToMigrate(Long srcDataStoreId) { String[] validStates = new String[]{"Ready", "Allocated", "Destroying", "Destroyed", "Failed"}; From 8a2208e5c0b0482b24a6e45557468e7fecde4782 Mon Sep 17 00:00:00 2001 From: dahn Date: Wed, 20 May 2020 08:49:08 +0200 Subject: [PATCH 13/40] textual change in javadoc --- .../engine/orchestration/DataMigrationUtility.java | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java index ec8133ade349..05123b247c1e 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java @@ -81,9 +81,10 @@ public class DataMigrationUtility { @Inject SnapshotDao snapshotDao; - /** This function verifies if the given image store comprises of data objects that are not in the following states" - * "Ready" "Allocated", "Destroying", "Destroyed", "Failed" - in such a case, if the migration policy is complete, - * the migration is terminated + /** + * This function verifies if the given image store contains data objects that are not in any of the following states: + * "Ready" "Allocated", "Destroying", "Destroyed", "Failed". If this is the case, and if the migration policy is complete, + * the migration is terminated. */ private boolean filesReadyToMigrate(Long srcDataStoreId) { String[] validStates = new String[]{"Ready", "Allocated", "Destroying", "Destroyed", "Failed"}; From 5b4c2414fc108caf3574a693d4b62927c4af811a Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 19 May 2020 17:54:55 +0530 Subject: [PATCH 14/40] updated comment --- .../orchestration/DataMigrationUtility.java | 5 +++-- .../java/com/cloud/configuration/Config.java | 17 ++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java index d768734186e0..ec8133ade349 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/DataMigrationUtility.java @@ -81,8 +81,9 @@ public class DataMigrationUtility { @Inject SnapshotDao snapshotDao; - /** This function verifies if the given image store comprises of data objects that are not in either the "Ready" or - * "Allocated" state - in such a case, if the migration policy is complete, the migration is terminated + /** This function verifies if the given image store comprises of data objects that are not in the following states" + * "Ready" "Allocated", "Destroying", "Destroyed", "Failed" - in such a case, if the migration policy is complete, + * the migration is terminated */ private boolean filesReadyToMigrate(Long srcDataStoreId) { String[] validStates = new String[]{"Ready", "Allocated", "Destroying", "Destroyed", "Failed"}; diff --git a/server/src/main/java/com/cloud/configuration/Config.java b/server/src/main/java/com/cloud/configuration/Config.java index c72eb498b1b8..3daf720138c1 100644 --- a/server/src/main/java/com/cloud/configuration/Config.java +++ b/server/src/main/java/com/cloud/configuration/Config.java @@ -16,15 +16,6 @@ // under the License. package com.cloud.configuration; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.StringTokenizer; - -import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; -import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; -import org.apache.cloudstack.framework.config.ConfigKey; - import com.cloud.agent.AgentManager; import com.cloud.consoleproxy.ConsoleProxyManager; import com.cloud.ha.HighAvailabilityManager; @@ -38,6 +29,14 @@ import com.cloud.template.TemplateManager; import com.cloud.vm.UserVmManager; import com.cloud.vm.snapshot.VMSnapshotManager; +import org.apache.cloudstack.engine.orchestration.service.NetworkOrchestrationService; +import org.apache.cloudstack.engine.subsystem.api.storage.StoragePoolAllocator; +import org.apache.cloudstack.framework.config.ConfigKey; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.StringTokenizer; /** * @deprecated use the more dynamic ConfigKey From cb1e5baf7c8933f4295e8bc8056da2a12c6442bc Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 29 May 2020 18:35:12 +0530 Subject: [PATCH 15/40] refined error message --- .../main/java/com/cloud/storage/ImageStoreServiceImpl.java | 7 ++++--- .../storage/resource/NfsSecondaryStorageResource.java | 6 +++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index bcddd67c0eed..4e1673c2abe1 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -30,7 +30,6 @@ import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.jobs.AsyncJobManager; import org.apache.cloudstack.storage.ImageStoreService; -import org.apache.cloudstack.storage.ImageStoreService.MigrationPolicy; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.commons.lang3.EnumUtils; @@ -69,6 +68,7 @@ public boolean configure(String name, Map params) throws Configu @Override public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { Long srcImgStoreId = cmd.getId(); + String errorMessage = ""; ImageStoreVO srcImageVO = imageStoreDao.findById(srcImgStoreId); List destImgStoreIds = cmd.getMigrateTo(); String migrationType = cmd.getMigrationType(); @@ -106,14 +106,15 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { continue; } if (imageStoreDao.findById(id).isReadonly()) { - s_logger.warn("Secondary storage: "+ id + " cannot be considered for migration as has read-only permission, Skipping it..."); + errorMessage = "Secondary storage: "+ id + " cannot be considered for migration as has read-only permission, Skipping it... "; + s_logger.warn(errorMessage); continue; } destDatastores.add(id); } if (destDatastores.size() < 1) { - throw new CloudRuntimeException("Invalid destination image store(s) provided. Terminating Migration of data"); + throw new CloudRuntimeException(errorMessage + "No destination store(s) available to migrate. Terminating Migration of data"); } if (isMigrateJobRunning()){ diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 5341294cceb6..c52ac68e3f9b 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -1346,7 +1346,11 @@ protected Answer copyFromNfsToNfs(CopyCommand cmd) { retObj = newTemplate; } else if (destData.getObjectType() == DataObjectType.VOLUME) { VolumeObjectTO newVol = new VolumeObjectTO(); - newVol.setPath(destData.getPath() + File.separator + srcFile.getName()); + if (srcFile.isFile()) { + newVol.setPath(destData.getPath() + File.separator + srcFile.getName()); + } else { + newVol.setPath(destData.getPath()); + } newVol.setSize(srcFile.length()); retObj = newVol; } else if (destData.getObjectType() == DataObjectType.SNAPSHOT) { From b70fc1de84c97404304dc51e0eeef9bc1dc35a5e Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 18 Jun 2020 15:50:20 +0530 Subject: [PATCH 16/40] Adhere to readonly flag when allzones option selected --- .../cloudstack/storage/datastore/db/ImageStoreDaoImpl.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java index 0350b2907473..6ecac5ed8094 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/ImageStoreDaoImpl.java @@ -80,14 +80,14 @@ public List findByProvider(String provider) { public List findByZone(ZoneScope scope, Boolean readonly) { SearchCriteria sc = createSearchCriteria(); sc.addAnd("role", SearchCriteria.Op.EQ, DataStoreRole.Image); + if (readonly != null) { + sc.addAnd("readonly", SearchCriteria.Op.EQ, readonly); + } if (scope.getScopeId() != null) { SearchCriteria scc = createSearchCriteria(); scc.addOr("scope", SearchCriteria.Op.EQ, ScopeType.REGION); scc.addOr("dcId", SearchCriteria.Op.EQ, scope.getScopeId()); sc.addAnd("scope", SearchCriteria.Op.SC, scc); - if (readonly != null) { - sc.addAnd("readonly", SearchCriteria.Op.EQ, readonly); - } } // we should return all image stores if cross-zone scope is passed // (scopeId = null) From cc3ce75dd851210c8e0d9ecd6f588f4587ef33f5 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Thu, 18 Jun 2020 18:29:32 +0530 Subject: [PATCH 17/40] Added event details --- api/src/main/java/com/cloud/event/EventTypes.java | 5 +++++ .../admin/storage/MigrateSecondaryStorageDataCmd.java | 1 + .../main/java/com/cloud/storage/ImageStoreServiceImpl.java | 3 +++ 3 files changed, 9 insertions(+) diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 14da293e17dc..9178e832ad42 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -70,6 +70,7 @@ import com.cloud.server.ResourceTag; import com.cloud.storage.GuestOS; import com.cloud.storage.GuestOSHypervisor; +import com.cloud.storage.ImageStore; import com.cloud.storage.Snapshot; import com.cloud.storage.StoragePool; import com.cloud.storage.Volume; @@ -320,6 +321,8 @@ public class EventTypes { public static final String EVENT_STORAGE_IP_RANGE_DELETE = "STORAGE.IP.RANGE.DELETE"; public static final String EVENT_STORAGE_IP_RANGE_UPDATE = "STORAGE.IP.RANGE.UPDATE"; + public static final String EVENT_IMAGE_STORE_DATA_MIGRATE = "IMAGE.STORE.MIGRATE.DATA"; + // Configuration Table public static final String EVENT_CONFIGURATION_VALUE_EDIT = "CONFIGURATION.VALUE.EDIT"; @@ -1008,6 +1011,8 @@ public class EventTypes { entityEventDetails.put(EVENT_POD_ROLLING_MAINTENANCE, PodResponse.class); entityEventDetails.put(EVENT_CLUSTER_ROLLING_MAINTENANCE, ClusterResponse.class); entityEventDetails.put(EVENT_HOST_ROLLING_MAINTENANCE, HostResponse.class); + + entityEventDetails.put(EVENT_IMAGE_STORE_DATA_MIGRATE, ImageStore.class); } public static String getEntityForEvent(String eventName) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java index 233f1e8b1684..6c169ff0887a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java @@ -101,6 +101,7 @@ public void execute() { MigrationResponse response = _imageStoreService.migrateData(this); response.setObjectName("imagestore"); this.setResponseObject(response); + CallContext.current().setEventDetails(response.getMessage()); } @Override diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index aff0e8d1c2fd..19f2877354a0 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -35,6 +35,8 @@ import org.apache.commons.lang3.EnumUtils; import org.apache.log4j.Logger; +import com.cloud.event.ActionEvent; +import com.cloud.event.EventTypes; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.exception.CloudRuntimeException; @@ -66,6 +68,7 @@ public boolean configure(String name, Map params) throws Configu } @Override + @ActionEvent(eventType = EventTypes.EVENT_IMAGE_STORE_DATA_MIGRATE, eventDescription = "migrating Image store data", async = true) public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { Long srcImgStoreId = cmd.getId(); ImageStoreVO srcImageVO = imageStoreDao.findById(srcImgStoreId); From 861c6d5b39ee1580f4f9e6260688c8fef68f383b Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 22 Jun 2020 15:16:52 +0530 Subject: [PATCH 18/40] event logs modified + vmware changes --- .../MigrateSecondaryStorageDataCmd.java | 5 ++--- .../image/BaseImageStoreDriverImpl.java | 22 ++++++++++++++++--- .../cloud/storage/ImageStoreServiceImpl.java | 11 +++++++--- .../resource/NfsSecondaryStorageResource.java | 2 +- 4 files changed, 30 insertions(+), 10 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java index 6c169ff0887a..9abbecfcd8e7 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/storage/MigrateSecondaryStorageDataCmd.java @@ -30,7 +30,6 @@ import org.apache.log4j.Logger; import com.cloud.event.EventTypes; -import com.cloud.utils.StringUtils; @APICommand(name = MigrateSecondaryStorageDataCmd.APINAME, description = "migrates data objects from one secondary storage to destination image store(s)", @@ -88,12 +87,12 @@ public String getMigrationType() { @Override public String getEventType() { - return EventTypes.EVENT_FILE_MIGRATE; + return EventTypes.EVENT_IMAGE_STORE_DATA_MIGRATE; } @Override public String getEventDescription() { - return "Attempting to migrate files/data objects " + "from : " + this.getId() + " to: " + StringUtils.join(getMigrateTo(), ","); + return "Attempting to migrate files/data objects "; } @Override diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index fc2558495ecd..baef5dab46b0 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -54,6 +54,7 @@ import org.apache.commons.collections.CollectionUtils; import org.apache.log4j.Logger; +import com.cloud.agent.AgentManager; import com.cloud.agent.api.Answer; import com.cloud.agent.api.storage.CreateDatadiskTemplateCommand; import com.cloud.agent.api.storage.DownloadAnswer; @@ -66,6 +67,8 @@ import com.cloud.agent.api.to.NfsTO; import com.cloud.alert.AlertManager; import com.cloud.configuration.Config; +import com.cloud.exception.AgentUnavailableException; +import com.cloud.exception.OperationTimedoutException; import com.cloud.host.dao.HostDao; import com.cloud.secstorage.CommandExecLogDao; import com.cloud.secstorage.CommandExecLogVO; @@ -128,6 +131,8 @@ public abstract class BaseImageStoreDriverImpl implements ImageStoreDriver { StorageManager storageMgr; @Inject protected SecondaryStorageVmDao _secStorageVmDao; + @Inject + AgentManager agentMgr; protected String _proxy = null; @@ -400,9 +405,20 @@ private Answer sendToLeastBusyEndpoint(List eps, CopyCommand cmd) { } CommandExecLogVO execLog = new CommandExecLogVO(endPoint.getId(), _secStorageVmDao.findByInstanceName(hostDao.findById(endPoint.getId()).getName()).getId(), cmd.getClass().getSimpleName(), 1); Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); - answer = endPoint.sendMessage(cmd); - answer.setContextParam("cmd", cmdExecId.toString()); - return answer; + //answer = endPoint.sendMessage(cmd); + String errMsg = null; + try { + answer = agentMgr.send(endPoint.getId(), cmd); + answer.setContextParam("cmd", cmdExecId.toString()); + return answer; + } catch (AgentUnavailableException e) { + errMsg = e.toString(); + s_logger.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); + } catch (OperationTimedoutException e) { + errMsg = e.toString(); + s_logger.debug("Failed to send command, due to Agent:" + endPoint.getId() + ", " + e.toString()); + } + throw new CloudRuntimeException("Failed to send command, due to Agent:" + endPoint.getId() + ", " + errMsg); } @Override diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index 19f2877354a0..6b82117e2a8b 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -26,6 +26,7 @@ import org.apache.cloudstack.api.command.admin.storage.MigrateSecondaryStorageDataCmd; import org.apache.cloudstack.api.response.MigrationResponse; +import org.apache.cloudstack.context.CallContext; import org.apache.cloudstack.engine.orchestration.service.StorageOrchestrationService; import org.apache.cloudstack.framework.config.ConfigKey; import org.apache.cloudstack.framework.jobs.AsyncJobManager; @@ -73,6 +74,7 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { Long srcImgStoreId = cmd.getId(); ImageStoreVO srcImageVO = imageStoreDao.findById(srcImgStoreId); List destImgStoreIds = cmd.getMigrateTo(); + List imagestores = new ArrayList(); String migrationType = cmd.getMigrationType(); // default policy is complete @@ -90,7 +92,7 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { if (srcImageVO == null) { throw new CloudRuntimeException("Cannot find secondary storage with id: " + srcImgStoreId); } - + imagestores.add(srcImageVO.getName()); if (srcImageVO.getRole() != DataStoreRole.Image) { throw new CloudRuntimeException("Secondary storage is not of Image Role"); } @@ -103,15 +105,17 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { // Validate all the Ids correspond to valid Image stores List destDatastores = new ArrayList<>(); for (Long id : destImgStoreIds) { - if (imageStoreDao.findById(id) == null) { + ImageStoreVO store = imageStoreDao.findById(id); + if (store == null) { s_logger.warn("Secondary storage with id: " + id + "is not found. Skipping it..."); continue; } - if (imageStoreDao.findById(id).isReadonly()) { + if (store.isReadonly()) { s_logger.warn("Secondary storage: "+ id + " cannot be considered for migration as has read-only permission, Skipping it... "); continue; } destDatastores.add(id); + imagestores.add(store.getName()); } if (destDatastores.size() < 1) { @@ -124,6 +128,7 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { return new MigrationResponse(message, policy.toString(), false); } + CallContext.current().setEventDetails("Migrating files/data objects " + "from : " + imagestores.get(0) + " to: " + imagestores.subList(1, imagestores.size())); return stgService.migrateData(srcImgStoreId, destDatastores, policy); } diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index c52ac68e3f9b..785af982d8f8 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -1305,7 +1305,7 @@ protected Answer copyFromNfsToNfs(CopyCommand cmd) { } if (srcData instanceof TemplateObjectTO || srcData instanceof VolumeObjectTO) { File srcDir = null; - if (srcFile.isFile()) { + if (srcFile.isFile() || srcFile.getName().contains(".")) { srcDir = new File(srcFile.getParent()); } File destDir = null; From 4270352bd67309b0e9cda946121506615ecc0359 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 23 Jun 2020 20:26:10 +0530 Subject: [PATCH 19/40] Added check for only image store - more restrictive check --- .../cloudstack/storage/image/BaseImageStoreDriverImpl.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index baef5dab46b0..2606e3e37f70 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -72,6 +72,7 @@ import com.cloud.host.dao.HostDao; import com.cloud.secstorage.CommandExecLogDao; import com.cloud.secstorage.CommandExecLogVO; +import com.cloud.storage.DataStoreRole; import com.cloud.storage.StorageManager; import com.cloud.storage.TemplateOVFPropertyVO; import com.cloud.storage.Upload; @@ -405,7 +406,6 @@ private Answer sendToLeastBusyEndpoint(List eps, CopyCommand cmd) { } CommandExecLogVO execLog = new CommandExecLogVO(endPoint.getId(), _secStorageVmDao.findByInstanceName(hostDao.findById(endPoint.getId()).getName()).getId(), cmd.getClass().getSimpleName(), 1); Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); - //answer = endPoint.sendMessage(cmd); String errMsg = null; try { answer = agentMgr.send(endPoint.getId(), cmd); @@ -423,7 +423,10 @@ private Answer sendToLeastBusyEndpoint(List eps, CopyCommand cmd) { @Override public boolean canCopy(DataObject srcData, DataObject destData) { - if (srcData.getDataStore().getTO() instanceof NfsTO && destData.getDataStore().getTO() instanceof NfsTO) { + DataStore srcStore = srcData.getDataStore(); + DataStore destStore = destData.getDataStore(); + if ((srcData.getDataStore().getTO() instanceof NfsTO && destData.getDataStore().getTO() instanceof NfsTO) && + (srcStore.getRole() == DataStoreRole.Image && destStore.getRole() == DataStoreRole.Image)) { return true; } return false; From 1515108811acd7a0213a207868c14beab2076357 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 23 Jun 2020 20:26:10 +0530 Subject: [PATCH 20/40] Added check for only image store - more restrictive check --- .../engine/orchestration/StorageOrchestrator.java | 5 +++-- .../cloudstack/storage/image/BaseImageStoreDriverImpl.java | 7 +++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 9b2efc8f5546..c27debfc10fd 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -197,7 +197,7 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto } if (shouldMigrate(chosenFileForMigration, srcDatastore.getId(), destDatastoreId, storageCapacities, snapshotChains, migrationPolicy)) { - migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, srcDatastore, destDatastoreId, executor, futures); + storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, srcDatastore, destDatastoreId, executor, futures); } else { if (migrationPolicy == MigrationPolicy.BALANCE) { message = "Migration completed and has successfully balanced the data objects among stores: " + StringUtils.join(storageCapacities.keySet(), ","); @@ -232,7 +232,7 @@ protected Pair migrateCompleted(Long destDatastoreId, DataStore return new Pair(message, success); } - protected void migrateAway(DataObject chosenFileForMigration, Map> storageCapacities, + protected Map> migrateAway(DataObject chosenFileForMigration, Map> storageCapacities, Map, Long>> snapshotChains, DataStore srcDatastore, Long destDatastoreId, ThreadPoolExecutor executor, List>> futures) { Long fileSize = migrationHelper.getFileSize(chosenFileForMigration, snapshotChains); @@ -251,6 +251,7 @@ protected void migrateAway(DataObject chosenFileForMigration, Map eps, CopyCommand cmd) { } CommandExecLogVO execLog = new CommandExecLogVO(endPoint.getId(), _secStorageVmDao.findByInstanceName(hostDao.findById(endPoint.getId()).getName()).getId(), cmd.getClass().getSimpleName(), 1); Long cmdExecId = _cmdExecLogDao.persist(execLog).getId(); - //answer = endPoint.sendMessage(cmd); String errMsg = null; try { answer = agentMgr.send(endPoint.getId(), cmd); @@ -423,7 +423,10 @@ private Answer sendToLeastBusyEndpoint(List eps, CopyCommand cmd) { @Override public boolean canCopy(DataObject srcData, DataObject destData) { - if (srcData.getDataStore().getTO() instanceof NfsTO && destData.getDataStore().getTO() instanceof NfsTO) { + DataStore srcStore = srcData.getDataStore(); + DataStore destStore = destData.getDataStore(); + if ((srcData.getDataStore().getTO() instanceof NfsTO && destData.getDataStore().getTO() instanceof NfsTO) && + (srcStore.getRole() == DataStoreRole.Image && destStore.getRole() == DataStoreRole.Image)) { return true; } return false; From 15486b30b87ef933a521ccf44016dc9d3f623776 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 24 Jun 2020 14:59:20 +0530 Subject: [PATCH 21/40] Added detailed logs --- .../engine/orchestration/StorageOrchestrator.java | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index c27debfc10fd..13970cd18f4e 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -64,7 +64,6 @@ import com.cloud.storage.StorageStats; import com.cloud.storage.dao.SnapshotDao; import com.cloud.utils.Pair; -import com.cloud.utils.StringUtils; import com.cloud.utils.component.ManagerBase; import com.cloud.utils.exception.CloudRuntimeException; @@ -200,11 +199,10 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto storageCapacities = migrateAway(chosenFileForMigration, storageCapacities, snapshotChains, srcDatastore, destDatastoreId, executor, futures); } else { if (migrationPolicy == MigrationPolicy.BALANCE) { - message = "Migration completed and has successfully balanced the data objects among stores: " + StringUtils.join(storageCapacities.keySet(), ","); - } else { - message = "Complete migration failed. Please set the source Image store to read-write mode if you want to continue using it"; - success = false; + continue; } + message = "Complete migration failed. Please set the source Image store to read-write mode if you want to continue using it"; + success = false; break; } } @@ -219,7 +217,10 @@ protected Pair migrateCompleted(Long destDatastoreId, DataStore if (destDatastoreId == srcDatastore.getId() && !files.isEmpty()) { if (migrationPolicy == MigrationPolicy.BALANCE) { s_logger.debug("Migration completed : data stores have been balanced "); - message = "Image stores have been balanced"; + if (destDatastoreId == srcDatastore.getId()) { + message = "Seems like source datastore has more free capacity than the destination(s)"; + } + message += "Image stores have been attempted to be balanced"; success = true; } else { message = "Files not completely migrated from "+ srcDatastore.getId() + ". Datastore (source): " + srcDatastore.getId() + "has equal or more free space than destination."+ From 8f2b224aebd1c7f3592055e1200a7661460f4371 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 29 Jun 2020 17:50:55 +0530 Subject: [PATCH 22/40] handle case for tests --- .../main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 8f509de93c7b..75d63a920fd2 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -152,6 +152,7 @@ public TemplateResponse newTemplateResponse(ResponseView view, TemplateJoinVO te for (TemplateDataStoreVO templateInStore : templatesInStore) { downloadDetailInImageStores = new HashMap<>(); ImageStoreVO store = dataStoreDao.findById(templateInStore.getDataStoreId()); + // check for valid store String name = store != null ? store.getName() : ""; downloadDetailInImageStores.put("datastore", name); downloadDetailInImageStores.put("dowloadPercent", Integer.toString(templateInStore.getDownloadPercent())); From 79d0d8bc111930a8bbfd18093ce999fb31e7f865 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Mon, 29 Jun 2020 19:53:23 +0530 Subject: [PATCH 23/40] template registration failure marvin --- .../cloud/api/query/dao/TemplateJoinDaoImpl.java | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 75d63a920fd2..16c99b7e3081 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -151,13 +151,14 @@ public TemplateResponse newTemplateResponse(ResponseView view, TemplateJoinVO te HashMap downloadDetailInImageStores = null; for (TemplateDataStoreVO templateInStore : templatesInStore) { downloadDetailInImageStores = new HashMap<>(); - ImageStoreVO store = dataStoreDao.findById(templateInStore.getDataStoreId()); - // check for valid store - String name = store != null ? store.getName() : ""; - downloadDetailInImageStores.put("datastore", name); - downloadDetailInImageStores.put("dowloadPercent", Integer.toString(templateInStore.getDownloadPercent())); - downloadDetailInImageStores.put("dowloadState", (templateInStore.getDownloadState() != null ? templateInStore.getDownloadState().toString() : "")); - dowloadProgressDetails.add(downloadDetailInImageStores); + if (templateInStore != null) { + ImageStoreVO store = dataStoreDao.findById(templateInStore.getDataStoreId()); + String name = store != null ? store.getName() : ""; + downloadDetailInImageStores.put("datastore", name); + downloadDetailInImageStores.put("dowloadPercent", Integer.toString(templateInStore.getDownloadPercent())); + downloadDetailInImageStores.put("dowloadState", (templateInStore.getDownloadState() != null ? templateInStore.getDownloadState().toString() : "")); + dowloadProgressDetails.add(downloadDetailInImageStores); + } } TemplateResponse templateResponse = new TemplateResponse(); templateResponse.setDownloadProgress(dowloadProgressDetails); From 3ef207ae6b0d8dbda03874313c3cb87d5a88b705 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 30 Jun 2020 15:17:07 +0530 Subject: [PATCH 24/40] Fixed marvin failures --- .../api/response/TemplateResponse.java | 4 ++-- .../datastore/db/TemplateDataStoreDao.java | 2 ++ .../image/db/TemplateDataStoreDaoImpl.java | 10 ++++++++++ .../api/query/dao/TemplateJoinDaoImpl.java | 19 +++++++------------ 4 files changed, 21 insertions(+), 14 deletions(-) diff --git a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java index 607098b5f3ed..094fe2aa5660 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/TemplateResponse.java @@ -176,7 +176,7 @@ public class TemplateResponse extends BaseResponseWithTagInformation implements @SerializedName(ApiConstants.DOWNLOAD_DETAILS) @Param(description = "Lists the download progress of a template across all secondary storages") - private List downloadDetails; + private List> downloadDetails; @SerializedName(ApiConstants.BITS) @Param(description = "the processor bit size", since = "4.10") @@ -260,7 +260,7 @@ public void setPublic(boolean isPublic) { this.isPublic = isPublic; } - public void setDownloadProgress(List downloadDetails) { + public void setDownloadProgress(List> downloadDetails) { this.downloadDetails = downloadDetails; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java index a6e609e7d870..fc695f476779 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/TemplateDataStoreDao.java @@ -66,6 +66,8 @@ public interface TemplateDataStoreDao extends GenericDao listByTemplate(long templateId); + List listByTemplateNotBypassed(long templateId); + TemplateDataStoreVO findByTemplateZoneReady(long templateId, Long zoneId); void duplicateCacheRecordsOnRegionStore(long storeId); diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java index 2372e8444cc5..5a0e4eeceede 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/db/TemplateDataStoreDaoImpl.java @@ -97,6 +97,7 @@ public boolean configure(String name, Map params) throws Configu templateSearch = createSearchBuilder(); templateSearch.and("template_id", templateSearch.entity().getTemplateId(), SearchCriteria.Op.EQ); + templateSearch.and("download_state", templateSearch.entity().getDownloadState(), SearchCriteria.Op.NEQ); templateSearch.and("destroyed", templateSearch.entity().getDestroyed(), SearchCriteria.Op.EQ); templateSearch.done(); @@ -418,6 +419,15 @@ public List listByTemplate(long templateId) { return search(sc, null); } + @Override + public List listByTemplateNotBypassed(long templateId) { + SearchCriteria sc = templateSearch.create(); + sc.setParameters("template_id", templateId); + sc.setParameters("download_state", Status.BYPASSED); + sc.setParameters("destroyed", false); + return search(sc, null); + } + @Override public TemplateDataStoreVO findByTemplateZone(long templateId, Long zoneId, DataStoreRole role) { // get all elgible image stores diff --git a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java index 16c99b7e3081..2d7192b5276a 100644 --- a/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java +++ b/server/src/main/java/com/cloud/api/query/dao/TemplateJoinDaoImpl.java @@ -33,7 +33,6 @@ import org.apache.cloudstack.engine.subsystem.api.storage.TemplateState; import org.apache.cloudstack.framework.config.dao.ConfigurationDao; import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; -import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.utils.security.DigestHelper; @@ -146,22 +145,18 @@ private String getTemplateStatus(TemplateJoinVO template) { @Override public TemplateResponse newTemplateResponse(ResponseView view, TemplateJoinVO template) { - List templatesInStore = _templateStoreDao.listByTemplate(template.getId()); - List dowloadProgressDetails = new ArrayList(); + List templatesInStore = _templateStoreDao.listByTemplateNotBypassed(template.getId()); + List> downloadProgressDetails = new ArrayList(); HashMap downloadDetailInImageStores = null; for (TemplateDataStoreVO templateInStore : templatesInStore) { downloadDetailInImageStores = new HashMap<>(); - if (templateInStore != null) { - ImageStoreVO store = dataStoreDao.findById(templateInStore.getDataStoreId()); - String name = store != null ? store.getName() : ""; - downloadDetailInImageStores.put("datastore", name); - downloadDetailInImageStores.put("dowloadPercent", Integer.toString(templateInStore.getDownloadPercent())); - downloadDetailInImageStores.put("dowloadState", (templateInStore.getDownloadState() != null ? templateInStore.getDownloadState().toString() : "")); - dowloadProgressDetails.add(downloadDetailInImageStores); - } + downloadDetailInImageStores.put("datastore", dataStoreDao.findById(templateInStore.getDataStoreId()).getName()); + downloadDetailInImageStores.put("downloadPercent", Integer.toString(templateInStore.getDownloadPercent())); + downloadDetailInImageStores.put("downloadState", (templateInStore.getDownloadState() != null ? templateInStore.getDownloadState().toString() : "")); + downloadProgressDetails.add(downloadDetailInImageStores); } TemplateResponse templateResponse = new TemplateResponse(); - templateResponse.setDownloadProgress(dowloadProgressDetails); + templateResponse.setDownloadProgress(downloadProgressDetails); templateResponse.setId(template.getUuid()); templateResponse.setName(template.getName()); templateResponse.setDisplayText(template.getDisplayText()); From 8bce77c9bbb62f4ea64fc40a4ac8e036c61d2925 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Tue, 30 Jun 2020 21:58:38 +0530 Subject: [PATCH 25/40] Add fsm check --- api/src/main/java/com/cloud/storage/Volume.java | 1 + 1 file changed, 1 insertion(+) diff --git a/api/src/main/java/com/cloud/storage/Volume.java b/api/src/main/java/com/cloud/storage/Volume.java index dde9d60a8482..1c668515a7a2 100644 --- a/api/src/main/java/com/cloud/storage/Volume.java +++ b/api/src/main/java/com/cloud/storage/Volume.java @@ -84,6 +84,7 @@ public String getDescription() { s_fsm.addTransition(new StateMachine2.Transition(Resizing, Event.OperationFailed, Ready, null)); s_fsm.addTransition(new StateMachine2.Transition(Allocated, Event.UploadRequested, UploadOp, null)); s_fsm.addTransition(new StateMachine2.Transition(Uploaded, Event.CopyRequested, Copying, null)); + s_fsm.addTransition(new StateMachine2.Transition(Uploaded, Event.MigrationRequested, Copying, null)); s_fsm.addTransition(new StateMachine2.Transition(Copying, Event.OperationSucceeded, Ready, Arrays.asList(new StateMachine2.Transition.Impact[]{StateMachine2.Transition.Impact.USAGE}))); s_fsm.addTransition(new StateMachine2.Transition(Copying, Event.OperationFailed, Uploaded, null)); s_fsm.addTransition(new StateMachine2.Transition(UploadOp, Event.DestroyRequested, Destroy, null)); From 357f92469879b5f99a8f0cf24b6576f9ec922c02 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 1 Jul 2020 21:06:40 +0530 Subject: [PATCH 26/40] regression checks --- .../engine/orchestration/StorageOrchestrator.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index 13970cd18f4e..a28a535ab146 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -159,7 +159,7 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto storageService.updateImageStoreStatus(srcDataStoreId, true); } - storageCapacities = getStorageCapacities(storageCapacities); + storageCapacities = getStorageCapacities(storageCapacities, srcDataStoreId); double meanstddev = getStandardDeviation(storageCapacities); double threshold = ImageStoreImbalanceThreshold.value(); MigrationResponse response = null; @@ -179,11 +179,11 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto chosenFileForMigration = files.remove(0); } - storageCapacities = getStorageCapacities(storageCapacities); + storageCapacities = getStorageCapacities(storageCapacities, srcDataStoreId); List orderedDS = migrationHelper.sortDataStores(storageCapacities); Long destDatastoreId = orderedDS.get(0); - if (chosenFileForMigration == null || destDatastoreId == null || destDatastoreId == srcDatastore.getId()) { + if (chosenFileForMigration == null || destDatastoreId == null || destDatastoreId == srcDatastore.getId() ) { Pair result = migrateCompleted(destDatastoreId, srcDatastore, files, migrationPolicy); message = result.first(); success = result.second(); @@ -303,7 +303,7 @@ private void handleSnapshotMigration(Long srcDataStoreId, Date start, Date end, } } - private Map> getStorageCapacities(Map> storageCapacities) { + private Map> getStorageCapacities(Map> storageCapacities, Long srcDataStoreId) { Map> capacities = new Hashtable<>(); for (Long storeId : storageCapacities.keySet()) { StorageStats stats = statsCollector.getStorageStats(storeId); @@ -313,10 +313,10 @@ private Map> getStorageCapacities(Map= storageCapacities.get(storeId).first()) { - capacities.put(storeId, storageCapacities.get(storeId)); - } else { + if (storeId.equals(srcDataStoreId) || freeCapacity < storageCapacities.get(storeId).first()) { capacities.put(storeId, new Pair<>(freeCapacity, totalCapacity)); + } else { + capacities.put(storeId, storageCapacities.get(storeId)); } } } else { From 4df63730f986cee535613a4ae6ceaf6add182f18 Mon Sep 17 00:00:00 2001 From: Vladimir Petrov Date: Fri, 10 Jul 2020 16:59:00 +0300 Subject: [PATCH 27/40] Added new tests related to new functionalities introduced with FR76 --- .../smoke/test_secondary_storage.py | 182 ++++++++++++++++++ test/integration/smoke/test_templates.py | 34 ++++ 2 files changed, 216 insertions(+) diff --git a/test/integration/smoke/test_secondary_storage.py b/test/integration/smoke/test_secondary_storage.py index b80b3e6813db..5a8f139de70b 100644 --- a/test/integration/smoke/test_secondary_storage.py +++ b/test/integration/smoke/test_secondary_storage.py @@ -24,6 +24,8 @@ from marvin.lib.base import * from marvin.lib.common import * from nose.plugins.attrib import attr +from marvin.cloudstackAPI import (listImageStores) +from marvin.cloudstackAPI import (updateImageStore) #Import System modules import time @@ -224,3 +226,183 @@ def test_02_sys_template_ready(self): True, "Builtin template is not ready %s in zone %s"%(template.status, zid) ) + + @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false") + def test_03_check_read_only_flag(self): + """Test the secondary storage read-only flag + """ + + # Validate the following + # It is possible to enable/disable the read-only flag on a secondary storage and filter by it + # 1. Make the first secondary storage as read-only and verify its state has been changed + # 2. Search for the read-only storages and make sure ours is in the list + # 3. Make it again read/write and verify it has been set properly + + first_storage = self.list_secondary_storages(self.apiclient)[0] + first_storage_id = first_storage['id'] + # Step 1 + self.update_secondary_storage(self.apiclient, first_storage_id, True) + updated_storage = self.list_secondary_storages(self.apiclient, first_storage_id)[0] + self.assertEqual( + updated_storage['readonly'], + True, + "Check if the secondary storage status has been set to read-only" + ) + + # Step 2 + readonly_storages = self.list_secondary_storages(self.apiclient, readonly=True) + self.assertEqual( + isinstance(readonly_storages, list), + True, + "Check list response returns a valid list" + ) + result = any(d['id'] == first_storage_id for d in readonly_storages) + self.assertEqual( + result, + True, + "Check if we are able to list storages by their read-only status" + ) + + # Step 3 + self.update_secondary_storage(self.apiclient, first_storage_id, False) + updated_storage = self.list_secondary_storages(self.apiclient, first_storage_id)[0] + self.assertEqual( + updated_storage['readonly'], + False, + "Check if the secondary storage status has been set back to read-write" + ) + + @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false") + def test_04_migrate_to_read_only_storage(self): + """Test migrations to a read-only secondary storage + """ + + # Validate the following + # It is not possible to migrate a storage to a read-only one + # NOTE: This test requires more than one secondary storage in the system + # 1. Make the first storage read-only + # 2. Try complete migration from the second to the first storage - it should fail + # 3. Try balanced migration from the second to the first storage - it should fail + # 4. Make the first storage read-write again + + storages = self.list_secondary_storages(self.apiclient) + if (len(storages)) < 2: + self.skipTest( + "This test requires more than one secondary storage") + + first_storage = self.list_secondary_storages(self.apiclient)[0] + first_storage_id = first_storage['id'] + second_storage = self.list_secondary_storages(self.apiclient)[1] + second_storage_id = second_storage['id'] + + # Set the first storage to read-only + self.update_secondary_storage(self.apiclient, first_storage_id, True) + + # Try complete migration from second to the first storage + + + success = False + try: + self.migrate_secondary_storage(self.apiclient, second_storage_id, first_storage_id, "complete") + except Exception as ex: + if re.search("No destination valid store\(s\) available to migrate.", str(ex)): + success = True + else: + self.debug("Secondary storage complete migration to a read-only one\ + did not fail appropriately. Error was actually : " + str(ex)); + + self.assertEqual(success, True, "Check if a complete migration to a read-only storage one fails appropriately") + + # Try balanced migration from second to the first storage + success = False + try: + self.migrate_secondary_storage(self.apiclient, second_storage_id, first_storage_id, "balance") + except Exception as ex: + if re.search("No destination valid store\(s\) available to migrate.", str(ex)): + success = True + else: + self.debug("Secondary storage balanced migration to a read-only one\ + did not fail appropriately. Error was actually : " + str(ex)) + + self.assertEqual(success, True, "Check if a balanced migration to a read-only storage one fails appropriately") + + # Set the first storage back to read-write + self.update_secondary_storage(self.apiclient, first_storage_id, False) + + @attr(tags = ["advanced", "advancedns", "smoke", "basic", "eip", "sg"], required_hardware="false") + def test_05_migrate_to_less_free_space(self): + """Test migrations when the destination storage has less space + """ + + # Validate the following + # Migration to a secondary storage with less space should be refused + # NOTE: This test requires more than one secondary storage in the system + # 1. Try complete migration from a storage with more (or equal) free space - migration should be refused + # 2. Try balanced migration from a storage with more (or equal) free space - migration should be refused + + storages = self.list_secondary_storages(self.apiclient) + if (len(storages)) < 2: + self.skipTest( + "This test requires more than one secondary storage") + + first_storage = self.list_secondary_storages(self.apiclient)[0] + first_storage_disksizeused = first_storage['disksizeused'] + first_storage_disksizetotal = first_storage['disksizetotal'] + second_storage = self.list_secondary_storages(self.apiclient)[1] + second_storage_disksizeused = second_storage['disksizeused'] + second_storage_disksizetotal = second_storage['disksizetotal'] + + first_storage_freespace = first_storage_disksizetotal - first_storage_disksizeused + second_storage_freespace = second_storage_disksizetotal - second_storage_disksizeused + + # Setting the storage with more free space as source storage + if first_storage_freespace > second_storage_freespace: + src_storage = first_storage['id'] + dst_storage = second_storage['id'] + else: + src_storage = second_storage['id'] + dst_storage = first_storage['id'] + + response = self.migrate_secondary_storage(self.apiclient, src_storage, dst_storage, "complete") + + success = False + if re.search("has equal or more free space than destination", str(response)): + success = True + else: + self.debug("Secondary storage complete migration to a storage \ + with less space was not refused. Here is the command output : " + str(response)) + + self.assertEqual(success, True, "Secondary storage complete migration to a storage\ + with less space was properly refused.") + + response = self.migrate_secondary_storage(self.apiclient, src_storage, dst_storage, "balance") + + success = False + if re.search("Migration not required as system seems balanced", str(response)): + success = True + else: + self.debug("Secondary storage balanced migration to a storage \ + with less space was not refused. Here is the command output : " + str(response)) + + self.assertEqual(success, True, "Secondary storage balanced migration to a storage\ + with less space was properly refused.") + + def list_secondary_storages(self, apiclient, id=None, readonly=None): + cmd = listImageStores.listImageStoresCmd() + cmd.id = id + cmd.readonly = readonly + return apiclient.listImageStores(cmd) + + def update_secondary_storage(self, apiclient, id, readonly): + cmd = updateImageStore.updateImageStoreCmd() + cmd.id = id + cmd.readonly = readonly + apiclient.updateImageStore(cmd) + + def migrate_secondary_storage(self, apiclient, first_id, second_id, type): + cmd = migrateSecondaryStorageData.migrateSecondaryStorageDataCmd() + cmd.srcpool = first_id + cmd.destpools = second_id + cmd.migrationtype = type + response = apiclient.migrateSecondaryStorageData(cmd) + return response diff --git a/test/integration/smoke/test_templates.py b/test/integration/smoke/test_templates.py index 9e9dd9fd3d60..345b363e5705 100644 --- a/test/integration/smoke/test_templates.py +++ b/test/integration/smoke/test_templates.py @@ -961,6 +961,40 @@ def test_08_list_system_templates(self): ) return + @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg", "test"], required_hardware="false") + def test_09_list_templates_download_details(self): + """Test if list templates returns download details""" + + # Validate the following + # 1. ListTemplates API has been extended to support viewing the download details - progress, download states and datastore + + list_template_response = Template.list( + self.apiclient, + templatefilter='all', + account=self.user.name, + domainid=self.user.domainid + ) + self.assertEqual( + isinstance(list_template_response, list), + True, + "Check list response returns a valid list" + ) + + self.assertNotEqual( + len(list_template_response), + 0, + "Check template available in List Templates" + ) + + for template in list_template_response: + self.assertNotEqual( + len(template.downloaddetails), + 0, + "Not all templates have download details" + ) + + return + class TestCopyAndDeleteTemplatesAcrossZones(cloudstackTestCase): @classmethod From 276eaa966b6cae69209dcddb5fdd320c31cd9688 Mon Sep 17 00:00:00 2001 From: Vladimir Petrov Date: Fri, 10 Jul 2020 16:59:59 +0300 Subject: [PATCH 28/40] Added new tests related to new functionalities introduced with FR76 --- test/integration/smoke/test_templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/smoke/test_templates.py b/test/integration/smoke/test_templates.py index 345b363e5705..ae34f7628f4b 100644 --- a/test/integration/smoke/test_templates.py +++ b/test/integration/smoke/test_templates.py @@ -961,7 +961,7 @@ def test_08_list_system_templates(self): ) return - @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg", "test"], required_hardware="false") + @attr(tags = ["advanced", "advancedns", "smoke", "basic", "sg"], required_hardware="false") def test_09_list_templates_download_details(self): """Test if list templates returns download details""" From a68acb7ec8eeae3133c46a5812379efd493bf244 Mon Sep 17 00:00:00 2001 From: Vladimir Petrov Date: Mon, 13 Jul 2020 23:54:51 +0300 Subject: [PATCH 29/40] Removed one case for balanced migration giving different results in Trillian and real environment. --- .../integration/smoke/test_secondary_storage.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/test/integration/smoke/test_secondary_storage.py b/test/integration/smoke/test_secondary_storage.py index 5a8f139de70b..baa0f98935ed 100644 --- a/test/integration/smoke/test_secondary_storage.py +++ b/test/integration/smoke/test_secondary_storage.py @@ -338,7 +338,6 @@ def test_05_migrate_to_less_free_space(self): # Migration to a secondary storage with less space should be refused # NOTE: This test requires more than one secondary storage in the system # 1. Try complete migration from a storage with more (or equal) free space - migration should be refused - # 2. Try balanced migration from a storage with more (or equal) free space - migration should be refused storages = self.list_secondary_storages(self.apiclient) if (len(storages)) < 2: @@ -355,6 +354,10 @@ def test_05_migrate_to_less_free_space(self): first_storage_freespace = first_storage_disksizetotal - first_storage_disksizeused second_storage_freespace = second_storage_disksizetotal - second_storage_disksizeused + if first_storage_freespace == second_storage_freespace: + self.skipTest( + "This test requires two secondary storages with different free space") + # Setting the storage with more free space as source storage if first_storage_freespace > second_storage_freespace: src_storage = first_storage['id'] @@ -375,18 +378,6 @@ def test_05_migrate_to_less_free_space(self): self.assertEqual(success, True, "Secondary storage complete migration to a storage\ with less space was properly refused.") - response = self.migrate_secondary_storage(self.apiclient, src_storage, dst_storage, "balance") - - success = False - if re.search("Migration not required as system seems balanced", str(response)): - success = True - else: - self.debug("Secondary storage balanced migration to a storage \ - with less space was not refused. Here is the command output : " + str(response)) - - self.assertEqual(success, True, "Secondary storage balanced migration to a storage\ - with less space was properly refused.") - def list_secondary_storages(self, apiclient, id=None, readonly=None): cmd = listImageStores.listImageStoresCmd() cmd.id = id From 526a6d21e5b37279ddc26038919d990b35c34535 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Fri, 24 Jul 2020 13:03:26 +0530 Subject: [PATCH 30/40] Change global setting value --- .../main/java/com/cloud/storage/StorageManager.java | 3 +++ .../engine/orchestration/StorageOrchestrator.java | 2 +- .../java/com/cloud/storage/ImageStoreServiceImpl.java | 2 +- .../java/com/cloud/storage/StorageManagerImpl.java | 3 ++- .../PremiumSecondaryStorageManagerImpl.java | 11 +++++++---- 5 files changed, 14 insertions(+), 7 deletions(-) diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index e0fb1cfbc146..0f52206dd785 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -115,6 +115,9 @@ public interface StorageManager extends StorageService { ConfigKey SecStorageMaxMigrateSessions = new ConfigKey("Advanced", Integer.class, "secstorage.max.migrate.sessions", "2", "The max number of concurrent copy command execution sessions that an SSVM can handle", true, ConfigKey.Scope.Global); + ConfigKey MaxDataMigrationWaitTime = new ConfigKey("Advanced", Integer.class, "max.data.migration.wait.time", "15", + "Maximum wait time for a data migration task before spawning a new SSVM", false, ConfigKey.Scope.Global); + /** * Returns a comma separated list of tags for the specified storage pool * @param poolId diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index a28a535ab146..f0bea7b2cbbf 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -95,7 +95,7 @@ public class StorageOrchestrator extends ManagerBase implements StorageOrchestra ConfigKey ImageStoreImbalanceThreshold = new ConfigKey<>("Advanced", Double.class, "image.store.imbalance.threshold", - "0.5", + "0.3", "The storage imbalance threshold that is compared with the standard deviation percentage for a storage utilization metric. " + "The value is a percentage in decimal format.", true, ConfigKey.Scope.Global); diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index 6b82117e2a8b..a12b5564695c 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -53,7 +53,7 @@ public class ImageStoreServiceImpl extends ManagerBase implements ImageStoreServ ConfigKey ImageStoreImbalanceThreshold = new ConfigKey<>("Advanced", Double.class, "image.store.imbalance.threshold", - "0.5", + "0.3", "The storage imbalance threshold that is compared with the standard deviation percentage for a storage utilization metric. " + "The value is a percentage in decimal format.", true, ConfigKey.Scope.Global); diff --git a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java index 7505fc3de4d2..c392e4b082eb 100644 --- a/server/src/main/java/com/cloud/storage/StorageManagerImpl.java +++ b/server/src/main/java/com/cloud/storage/StorageManagerImpl.java @@ -2514,7 +2514,8 @@ public ConfigKey[] getConfigKeys() { KvmAutoConvergence, MaxNumberOfManagedClusteredFileSystems, PRIMARY_STORAGE_DOWNLOAD_WAIT, - SecStorageMaxMigrateSessions + SecStorageMaxMigrateSessions, + MaxDataMigrationWaitTime }; } diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index ec8d4792eb9b..ddc7b4eeda2a 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -59,8 +59,9 @@ public class PremiumSecondaryStorageManagerImpl extends SecondaryStorageManagerI private int migrateCapPerSSVM = DEFAULT_MIGRATE_SS_VM_CAPACITY; private int _standbyCapacity = SecondaryStorageVmManager.DEFAULT_STANDBY_CAPACITY; private int _maxExecutionTimeMs = 1800000; + private int maxDataMigrationWaitTime = 900000; long currentTime = DateUtil.currentGMTTime().getTime(); - long nextSpawnTime = currentTime + _maxExecutionTimeMs/2; + long nextSpawnTime = currentTime + maxDataMigrationWaitTime; private List migrationSSVMS = new ArrayList<>(); @Inject @@ -84,9 +85,11 @@ public boolean configure(String name, Map params) throws Configu int nMaxExecutionMinutes = NumbersUtil.parseInt(_configDao.getValue(Config.SecStorageCmdExecutionTimeMax.key()), 30); _maxExecutionTimeMs = nMaxExecutionMinutes * 60 * 1000; - nextSpawnTime = currentTime + _maxExecutionTimeMs/2; migrateCapPerSSVM = StorageManager.SecStorageMaxMigrateSessions.value(); + int nMaxDataMigrationWaitTime = StorageManager.MaxDataMigrationWaitTime.value(); + maxDataMigrationWaitTime = nMaxDataMigrationWaitTime * 60 * 1000; + nextSpawnTime = currentTime + maxDataMigrationWaitTime; hostSearch = _hostDao.createSearchBuilder(); hostSearch.and("dc", hostSearch.entity().getDataCenterId(), Op.EQ); @@ -174,9 +177,9 @@ private Pair scaleSSVMOnLoad(List return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.commandExecutor); } else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= halfLimit && - ((Math.abs(currentTime - copyCmdsInPipeline.get(halfLimit - 1).getCreated().getTime()) > _maxExecutionTimeMs/2 )) && + ((Math.abs(currentTime - copyCmdsInPipeline.get(halfLimit - 1).getCreated().getTime()) > maxDataMigrationWaitTime )) && (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { - nextSpawnTime = currentTime + _maxExecutionTimeMs/2; + nextSpawnTime = currentTime + maxDataMigrationWaitTime; s_logger.debug("scaling SSVM to handle migration tasks"); return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.templateProcessor); From 78a566766f5a21c7a46063a6b4ab6132a8a15770 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 12 Aug 2020 00:10:05 +0530 Subject: [PATCH 31/40] Update PremiumSecondaryStorageManagerImpl.java --- .../PremiumSecondaryStorageManagerImpl.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index ddc7b4eeda2a..a1e85b71e00b 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -194,8 +194,8 @@ private void scaleDownSSVMOnLoad(List alreadyRunning, List if ((copyCmdsInPipeline.size() < halfLimit && alreadyRunning.size() * _capacityPerSSVM - activeCmds.size() > _standbyCapacity) && alreadyRunning.size() > 1) { Collections.reverse(alreadyRunning); for(SecondaryStorageVmVO vm : alreadyRunning) { - long count = copyCmdsInPipeline.stream().map(cmd -> cmd.getInstanceId() == vm.getId()).count(); - count += activeCmds.stream().map(cmd -> cmd.getInstanceId() == vm.getId()).count(); + long count = copyCmdsInPipeline.stream().filter(cmd -> cmd.getInstanceId() == vm.getId()).count(); + count += activeCmds.stream().filter(cmd -> cmd.getInstanceId() == vm.getId()).count(); if (count == 0) { destroySecStorageVm(vm.getId()); break; @@ -245,4 +245,4 @@ private boolean reserveStandbyCapacity() { } return false; } -} \ No newline at end of file +} From 67b41ecc8f30ba159bcaeadf9a7dffa6bca848a3 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 12 Aug 2020 14:38:02 +0530 Subject: [PATCH 32/40] code refactor --- .../engine/orchestration/StorageOrchestrator.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java index f0bea7b2cbbf..85b182a3dc21 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/StorageOrchestrator.java @@ -183,13 +183,17 @@ public MigrationResponse migrateData(Long srcDataStoreId, List destDatasto List orderedDS = migrationHelper.sortDataStores(storageCapacities); Long destDatastoreId = orderedDS.get(0); - if (chosenFileForMigration == null || destDatastoreId == null || destDatastoreId == srcDatastore.getId() ) { + if (chosenFileForMigration == null || destDatastoreId == null || (destDatastoreId == srcDatastore.getId() && migrationPolicy == MigrationPolicy.BALANCE) ) { Pair result = migrateCompleted(destDatastoreId, srcDatastore, files, migrationPolicy); message = result.first(); success = result.second(); break; } + if (migrationPolicy == MigrationPolicy.COMPLETE && destDatastoreId == srcDatastore.getId()) { + destDatastoreId = orderedDS.get(1); + } + if (chosenFileForMigration.getSize() > storageCapacities.get(destDatastoreId).first()) { s_logger.debug("file: " + chosenFileForMigration.getId() + " too large to be migrated to " + destDatastoreId); continue; From 490c235bcbbd8f1a07979bb0219205f795177e5f Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Sun, 16 Aug 2020 09:12:40 +0530 Subject: [PATCH 33/40] minor fix --- api/src/main/java/com/cloud/storage/Volume.java | 3 ++- .../api/storage/ObjectInDataStoreStateMachine.java | 3 ++- .../storage/image/SecondaryStorageServiceImpl.java | 8 ++++---- .../storage/datastore/ObjectInDataStoreManagerImpl.java | 4 ++-- .../apache/cloudstack/storage/volume/VolumeObject.java | 4 +++- .../cloudstack/storage/volume/VolumeServiceImpl.java | 1 + 6 files changed, 14 insertions(+), 9 deletions(-) diff --git a/api/src/main/java/com/cloud/storage/Volume.java b/api/src/main/java/com/cloud/storage/Volume.java index 1c668515a7a2..5fd78efb307e 100644 --- a/api/src/main/java/com/cloud/storage/Volume.java +++ b/api/src/main/java/com/cloud/storage/Volume.java @@ -84,7 +84,8 @@ public String getDescription() { s_fsm.addTransition(new StateMachine2.Transition(Resizing, Event.OperationFailed, Ready, null)); s_fsm.addTransition(new StateMachine2.Transition(Allocated, Event.UploadRequested, UploadOp, null)); s_fsm.addTransition(new StateMachine2.Transition(Uploaded, Event.CopyRequested, Copying, null)); - s_fsm.addTransition(new StateMachine2.Transition(Uploaded, Event.MigrationRequested, Copying, null)); + s_fsm.addTransition(new StateMachine2.Transition(Ready, Event.OperationSucceeded, Ready, null)); + s_fsm.addTransition(new StateMachine2.Transition(Ready, Event.OperationFailed, Ready, null)); s_fsm.addTransition(new StateMachine2.Transition(Copying, Event.OperationSucceeded, Ready, Arrays.asList(new StateMachine2.Transition.Impact[]{StateMachine2.Transition.Impact.USAGE}))); s_fsm.addTransition(new StateMachine2.Transition(Copying, Event.OperationFailed, Uploaded, null)); s_fsm.addTransition(new StateMachine2.Transition(UploadOp, Event.DestroyRequested, Destroy, null)); diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java index 3e6134f17026..611d1247c49d 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/subsystem/api/storage/ObjectInDataStoreStateMachine.java @@ -59,6 +59,7 @@ enum Event { MigrationCopySucceeded, MigrationCopyFailed, ResizeRequested, - ExpungeRequested + ExpungeRequested, + MigrateDataRequested } } diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java index 1505f9163a2d..e05dcaf9238e 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java @@ -82,8 +82,8 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D if (srcDataObject instanceof SnapshotInfo && snapshotChain != null && snapshotChain.containsKey(srcDataObject)) { for (SnapshotInfo snapshotInfo : snapshotChain.get(srcDataObject).first()) { destDataObject = destDatastore.create(snapshotInfo); - snapshotInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); - destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); + snapshotInfo.processEvent(ObjectInDataStoreStateMachine.Event.MigrateDataRequested); + destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrateDataRequested); migrateJob(future, snapshotInfo, destDataObject, destDatastore); } } else { @@ -101,8 +101,8 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D } } destDataObject = destDatastore.create(srcDataObject); - srcDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); - destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrationRequested); + srcDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrateDataRequested); + destDataObject.processEvent(ObjectInDataStoreStateMachine.Event.MigrateDataRequested); migrateJob(future, srcDataObject, destDataObject, destDatastore); } } catch (Exception e) { diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java index 2a12feb5c534..ff8112cceff9 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/datastore/ObjectInDataStoreManagerImpl.java @@ -105,9 +105,9 @@ public ObjectInDataStoreManagerImpl() { // alreay Ready for DownloadListener stateMachines.addTransition(State.Ready, Event.OperationSuccessed, State.Ready); // State transitions for data object migration - stateMachines.addTransition(State.Ready, Event.MigrationRequested, State.Migrating); + stateMachines.addTransition(State.Ready, Event.MigrateDataRequested, State.Migrating); stateMachines.addTransition(State.Ready, Event.CopyRequested, State.Copying); - stateMachines.addTransition(State.Allocated, Event.MigrationRequested, State.Migrating); + stateMachines.addTransition(State.Allocated, Event.MigrateDataRequested, State.Migrating); stateMachines.addTransition(State.Migrating, Event.MigrationFailed, State.Failed); stateMachines.addTransition(State.Migrating, Event.MigrationSucceeded, State.Destroyed); stateMachines.addTransition(State.Migrating, Event.OperationSuccessed, State.Ready); diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java index 2bb643bd652d..76e59d828566 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeObject.java @@ -391,7 +391,9 @@ public void processEvent(ObjectInDataStoreStateMachine.Event event) { if (event == ObjectInDataStoreStateMachine.Event.CreateOnlyRequested) { volEvent = Volume.Event.UploadRequested; } else if (event == ObjectInDataStoreStateMachine.Event.MigrationRequested) { - volEvent = Volume.Event.MigrationRequested; + volEvent = Event.CopyRequested; + } else if (event == ObjectInDataStoreStateMachine.Event.MigrateDataRequested) { + return; } } else { if (event == ObjectInDataStoreStateMachine.Event.CreateRequested || event == ObjectInDataStoreStateMachine.Event.CreateOnlyRequested) { diff --git a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java index f33d5e4106bd..77413ad6c2b6 100644 --- a/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java +++ b/engine/storage/volume/src/main/java/org/apache/cloudstack/storage/volume/VolumeServiceImpl.java @@ -1359,6 +1359,7 @@ protected Void copyVolumeFromImageToPrimaryCallback(AsyncCallbackDispatcher Date: Mon, 17 Aug 2020 11:01:19 +0530 Subject: [PATCH 34/40] fixes --- .../image/BaseImageStoreDriverImpl.java | 5 ++- .../cloud/storage/VolumeApiServiceImpl.java | 34 ++++++++++--------- 2 files changed, 22 insertions(+), 17 deletions(-) diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index 2606e3e37f70..522503dd40bd 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -426,7 +426,10 @@ public boolean canCopy(DataObject srcData, DataObject destData) { DataStore srcStore = srcData.getDataStore(); DataStore destStore = destData.getDataStore(); if ((srcData.getDataStore().getTO() instanceof NfsTO && destData.getDataStore().getTO() instanceof NfsTO) && - (srcStore.getRole() == DataStoreRole.Image && destStore.getRole() == DataStoreRole.Image)) { + (srcStore.getRole() == DataStoreRole.Image && destStore.getRole() == DataStoreRole.Image) && + ((srcData.getType() == DataObjectType.TEMPLATE && destData.getType() == DataObjectType.TEMPLATE) || + (srcData.getType() == DataObjectType.SNAPSHOT && destData.getType() == DataObjectType.SNAPSHOT) || + (srcData.getType() == DataObjectType.VOLUME && destData.getType() == DataObjectType.VOLUME))) { return true; } return false; diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java index dc90c37d2a73..b5c33eb5fa11 100644 --- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java @@ -2788,24 +2788,26 @@ private String orchestrateExtractVolume(long volumeId, long zoneId) { // Copy volume from primary to secondary storage VolumeInfo srcVol = volFactory.getVolume(volumeId); - AsyncCallFuture cvAnswer = volService.copyVolume(srcVol, secStore); - // Check if you got a valid answer. + VolumeInfo destVol = volFactory.getVolume(volumeId, DataStoreRole.Image); VolumeApiResult cvResult = null; - try { - cvResult = cvAnswer.get(); - } catch (InterruptedException e1) { - s_logger.debug("failed copy volume", e1); - throw new CloudRuntimeException("Failed to copy volume", e1); - } catch (ExecutionException e1) { - s_logger.debug("failed copy volume", e1); - throw new CloudRuntimeException("Failed to copy volume", e1); - } - if (cvResult == null || cvResult.isFailed()) { - String errorString = "Failed to copy the volume from the source primary storage pool to secondary storage."; - throw new CloudRuntimeException(errorString); + if (destVol == null) { + AsyncCallFuture cvAnswer = volService.copyVolume(srcVol, secStore); + // Check if you got a valid answer. + try { + cvResult = cvAnswer.get(); + } catch (InterruptedException e1) { + s_logger.debug("failed copy volume", e1); + throw new CloudRuntimeException("Failed to copy volume", e1); + } catch (ExecutionException e1) { + s_logger.debug("failed copy volume", e1); + throw new CloudRuntimeException("Failed to copy volume", e1); + } + if (cvResult == null || cvResult.isFailed()) { + String errorString = "Failed to copy the volume from the source primary storage pool to secondary storage."; + throw new CloudRuntimeException(errorString); + } } - - VolumeInfo vol = cvResult.getVolume(); + VolumeInfo vol = cvResult != null ? cvResult.getVolume() : destVol; String extractUrl = secStore.createEntityExtractUrl(vol.getPath(), vol.getFormat(), vol); VolumeDataStoreVO volumeStoreRef = _volumeStoreDao.findByVolume(volumeId); From 26e058beaa94dd56a05751370ab72abc7890a7e8 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 19 Aug 2020 13:20:25 +0530 Subject: [PATCH 35/40] Check for transitions --- .../image/SecondaryStorageServiceImpl.java | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java index e05dcaf9238e..adcba521193c 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java @@ -31,6 +31,7 @@ import org.apache.cloudstack.engine.subsystem.api.storage.SecondaryStorageService; import org.apache.cloudstack.engine.subsystem.api.storage.SnapshotInfo; import org.apache.cloudstack.engine.subsystem.api.storage.TemplateInfo; +import org.apache.cloudstack.engine.subsystem.api.storage.VolumeInfo; import org.apache.cloudstack.framework.async.AsyncCallFuture; import org.apache.cloudstack.framework.async.AsyncCallbackDispatcher; import org.apache.cloudstack.framework.async.AsyncCompletionCallback; @@ -110,7 +111,11 @@ public AsyncCallFuture migrateData(DataObject srcDataObject, D if (destDataObject != null) { destDataObject.getDataStore().delete(destDataObject); } - srcDataObject.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + if (!(srcDataObject instanceof VolumeInfo)) { + srcDataObject.processEvent(ObjectInDataStoreStateMachine.Event.OperationFailed); + } else { + ((VolumeInfo) srcDataObject).processEventOnly(ObjectInDataStoreStateMachine.Event.OperationFailed); + } res.setResult(e.toString()); future.complete(res); } @@ -138,9 +143,15 @@ protected Void migrateDataCallBack(AsyncCallbackDispatcher Date: Mon, 7 Sep 2020 11:16:52 +0530 Subject: [PATCH 36/40] Added guarding against stores other than NFS --- .../main/java/com/cloud/host/dao/HostDao.java | 2 ++ .../java/com/cloud/host/dao/HostDaoImpl.java | 15 +++++++++++++++ .../image/SecondaryStorageServiceImpl.java | 11 +++++++++++ .../cloud/storage/ImageStoreServiceImpl.java | 19 +++++++++++++++++++ .../PremiumSecondaryStorageManagerImpl.java | 10 +++++----- .../resource/NfsSecondaryStorageResource.java | 2 +- 6 files changed, 53 insertions(+), 6 deletions(-) diff --git a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java index d2f579e97b95..3d76c8b38c1d 100644 --- a/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java +++ b/engine/schema/src/main/java/com/cloud/host/dao/HostDao.java @@ -39,6 +39,8 @@ public interface HostDao extends GenericDao, StateDao implements HostDao protected SearchBuilder UnmanagedApplianceSearch; protected SearchBuilder MaintenanceCountSearch; protected SearchBuilder HostTypeCountSearch; + protected SearchBuilder HostTypeZoneCountSearch; protected SearchBuilder ClusterStatusSearch; protected SearchBuilder TypeNameZoneSearch; protected SearchBuilder AvailHypevisorInZone; @@ -167,6 +168,12 @@ public void init() { HostTypeCountSearch.and("removed", HostTypeCountSearch.entity().getRemoved(), SearchCriteria.Op.NULL); HostTypeCountSearch.done(); + HostTypeZoneCountSearch = createSearchBuilder(); + HostTypeZoneCountSearch.and("type", HostTypeZoneCountSearch.entity().getType(), SearchCriteria.Op.EQ); + HostTypeZoneCountSearch.and("dc", HostTypeZoneCountSearch.entity().getDataCenterId(), SearchCriteria.Op.EQ); + HostTypeZoneCountSearch.and("removed", HostTypeZoneCountSearch.entity().getRemoved(), SearchCriteria.Op.NULL); + HostTypeZoneCountSearch.done(); + TypePodDcStatusSearch = createSearchBuilder(); HostVO entity = TypePodDcStatusSearch.entity(); TypePodDcStatusSearch.and("type", entity.getType(), SearchCriteria.Op.EQ); @@ -447,6 +454,14 @@ public Integer countAllByType(final Host.Type type) { return getCount(sc); } + @Override + public Integer countAllByTypeInZone(long zoneId, Type type) { + SearchCriteria sc = HostTypeCountSearch.create(); + sc.setParameters("type", type); + sc.setParameters("dc", zoneId); + return getCount(sc); + } + @Override public List listByDataCenterId(long id) { SearchCriteria sc = DcSearch.create(); diff --git a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java index 62a96017b765..3a29333296f6 100644 --- a/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java +++ b/engine/storage/image/src/main/java/org/apache/cloudstack/storage/image/SecondaryStorageServiceImpl.java @@ -42,6 +42,8 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao; +import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO; import org.apache.log4j.Logger; import com.cloud.secstorage.CommandExecLogDao; @@ -60,6 +62,8 @@ public class SecondaryStorageServiceImpl implements SecondaryStorageService { TemplateDataStoreDao templateStoreDao; @Inject SnapshotDataStoreDao snapshotStoreDao; + @Inject + VolumeDataStoreDao volumeDataStoreDao; private class MigrateDataContext extends AsyncRpcContext { final DataObject srcData; @@ -168,6 +172,13 @@ protected Void migrateDataCallBack(AsyncCallbackDispatcher scanPool(Long pool) { alreadyRunning = _secStorageVmDao.getSecStorageVmListInStates(null, dataCenterId, State.Running, State.Migrating, State.Starting); List activeCmds = findActiveCommands(dataCenterId, cutTime); List copyCmdsInPipeline = findAllActiveCopyCommands(dataCenterId, cutTime); - return scaleSSVMOnLoad(alreadyRunning, activeCmds, copyCmdsInPipeline); + return scaleSSVMOnLoad(alreadyRunning, activeCmds, copyCmdsInPipeline, dataCenterId); } return new Pair(AfterScanAction.nop, null); } private Pair scaleSSVMOnLoad(List alreadyRunning, List activeCmds, - List copyCmdsInPipeline) { - Integer hostsCount = _hostDao.countAllByType(Host.Type.Routing); + List copyCmdsInPipeline, long dataCenterId) { + Integer hostsCount = _hostDao.countAllByTypeInZone(dataCenterId, Host.Type.Routing); Integer maxSsvms = (hostsCount < MaxNumberOfSsvmsForMigration.value()) ? hostsCount : MaxNumberOfSsvmsForMigration.value(); int halfLimit = Math.round((float) (alreadyRunning.size() * migrateCapPerSSVM) / 2); currentTime = DateUtil.currentGMTTime().getTime(); @@ -181,7 +181,7 @@ else if (!copyCmdsInPipeline.isEmpty() && copyCmdsInPipeline.size() >= halfLimi (currentTime > nextSpawnTime) && alreadyRunning.size() <= maxSsvms) { nextSpawnTime = currentTime + maxDataMigrationWaitTime; s_logger.debug("scaling SSVM to handle migration tasks"); - return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.templateProcessor); + return new Pair(AfterScanAction.expand, SecondaryStorageVm.Role.commandExecutor); } scaleDownSSVMOnLoad(alreadyRunning, activeCmds, copyCmdsInPipeline); @@ -195,7 +195,7 @@ private void scaleDownSSVMOnLoad(List alreadyRunning, List Collections.reverse(alreadyRunning); for(SecondaryStorageVmVO vm : alreadyRunning) { long count = activeCmds.stream().filter(cmd -> cmd.getInstanceId() == vm.getId()).count(); - if (count == 0 && copyCmdsInPipeline.size() == 0) { + if (count == 0 && copyCmdsInPipeline.size() == 0 && vm.getRole() != SecondaryStorageVm.Role.templateProcessor) { destroySecStorageVm(vm.getId()); break; } diff --git a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java index 3c78086f4887..0832ab6e496b 100644 --- a/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java +++ b/services/secondary-storage/server/src/main/java/org/apache/cloudstack/storage/resource/NfsSecondaryStorageResource.java @@ -1341,7 +1341,7 @@ protected Answer copyFromNfsToNfs(CopyCommand cmd) { } else { newVol.setPath(destData.getPath()); } - newVol.setSize(srcFile.length()); + newVol.setSize(getVirtualSize(srcFile, format)); retObj = newVol; } else if (destData.getObjectType() == DataObjectType.SNAPSHOT) { SnapshotObjectTO newSnapshot = new SnapshotObjectTO(); From 4675068d9217e3136ab41626255554bef1543831 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 9 Sep 2020 13:49:07 +0530 Subject: [PATCH 37/40] Update ImageStoreServiceImpl.java --- .../src/main/java/com/cloud/storage/ImageStoreServiceImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java index 17ca408817f1..43f9cd455be2 100644 --- a/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java +++ b/server/src/main/java/com/cloud/storage/ImageStoreServiceImpl.java @@ -128,7 +128,7 @@ public MigrationResponse migrateData(MigrateSecondaryStorageDataCmd cmd) { continue; } - if (srcStoreDcId != null & store.getDataCenterId() != null && !srcStoreDcId.equals(store.getDataCenterId())) { + if (srcStoreDcId != null && store.getDataCenterId() != null && !srcStoreDcId.equals(store.getDataCenterId())) { s_logger.warn("Source and destination stores are not in the same zone. Skipping destination store: " + store.getName()); continue; } From eed05f97d7e8181430566702d625885dc7b81b93 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Sat, 12 Sep 2020 08:44:08 +0530 Subject: [PATCH 38/40] Null check --- .../cloudstack/storage/image/BaseImageStoreDriverImpl.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java index 8e640074ee9d..965c33228887 100644 --- a/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java +++ b/engine/storage/src/main/java/org/apache/cloudstack/storage/image/BaseImageStoreDriverImpl.java @@ -375,7 +375,7 @@ public void copyAsync(DataObject srcdata, DataObject destData, AsyncCompletionCa // Select host endpoint such that the load is balanced out List eps = _epSelector.findAllEndpointsForScope(srcdata.getDataStore()); - if (eps.isEmpty()) { + if (eps == null || eps.isEmpty()) { String errMsg = "No remote endpoint to send command, check if host or ssvm is down?"; s_logger.error(errMsg); answer = new Answer(cmd, false, errMsg); From 716e77af138c8dd0bc5af4a334113f3dbb3a36cd Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 16 Sep 2020 12:06:30 +0530 Subject: [PATCH 39/40] Correct sql query to get all active commands on ssvms --- .../secondarystorage/PremiumSecondaryStorageManagerImpl.java | 2 +- test.sh | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 test.sh diff --git a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java index 7071248896aa..d21ec614f405 100644 --- a/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java +++ b/services/secondary-storage/controller/src/main/java/org/apache/cloudstack/secondarystorage/PremiumSecondaryStorageManagerImpl.java @@ -97,7 +97,7 @@ public boolean configure(String name, Map params) throws Configu activeCommandSearch = _cmdExecLogDao.createSearchBuilder(); activeCommandSearch.and("created", activeCommandSearch.entity().getCreated(), Op.GTEQ); - activeCommandSearch.join("hostSearch", hostSearch, activeCommandSearch.entity().getInstanceId(), hostSearch.entity().getId(), JoinType.INNER); + activeCommandSearch.join("hostSearch", hostSearch, activeCommandSearch.entity().getHostId(), hostSearch.entity().getId(), JoinType.INNER); activeCopyCommandSearch = _cmdExecLogDao.createSearchBuilder(); activeCopyCommandSearch.and("created", activeCopyCommandSearch.entity().getCreated(), Op.GTEQ); diff --git a/test.sh b/test.sh new file mode 100644 index 000000000000..d45dd51bccf7 --- /dev/null +++ b/test.sh @@ -0,0 +1 @@ +cks From 8a8552428f02925013f7b2e35efb72ee95704487 Mon Sep 17 00:00:00 2001 From: Pearl Dsilva Date: Wed, 16 Sep 2020 13:46:24 +0530 Subject: [PATCH 40/40] Removed junk file --- test.sh | 1 - 1 file changed, 1 deletion(-) delete mode 100644 test.sh diff --git a/test.sh b/test.sh deleted file mode 100644 index d45dd51bccf7..000000000000 --- a/test.sh +++ /dev/null @@ -1 +0,0 @@ -cks