hypervisorGuestOsMap = new HashMap<>() {
+ {
+ put(Hypervisor.HypervisorType.KVM, LINUX_12_ID);
+ put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID);
+ put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID);
+ put(Hypervisor.HypervisorType.LXC, LINUX_12_ID);
}
+ };
+
+ private static boolean isRunningInTest() {
+ return "true".equalsIgnoreCase(System.getProperty("test.mode"));
}
private static String getHypervisorArchLog(Hypervisor.HypervisorType hypervisorType, CPU.CPUArch arch) {
StringBuilder sb = new StringBuilder("hypervisor: ").append(hypervisorType.name());
- if (Hypervisor.HypervisorType.KVM.equals(hypervisorType)) {
- sb.append(", arch: ").append(arch == null ? CPU.CPUArch.amd64.getType() : arch.getType());
- }
+ sb.append(", arch: ").append(arch == null ? CPU.CPUArch.amd64.getType() : arch.getType());
return sb.toString();
}
- protected static String getHypervisorArchKey(Hypervisor.HypervisorType hypervisorType, CPU.CPUArch arch) {
- if (Hypervisor.HypervisorType.KVM.equals(hypervisorType)) {
- return String.format("%s-%s", hypervisorType.name().toLowerCase(),
- arch == null ? CPU.CPUArch.amd64.getType() : arch.getType());
- }
- return hypervisorType.name().toLowerCase();
- }
-
- protected static MetadataTemplateDetails getMetadataTemplateDetails(Hypervisor.HypervisorType hypervisorType,
- CPU.CPUArch arch) {
- return NewTemplateMap.get(getHypervisorArchKey(hypervisorType, arch));
- }
-
- public VMTemplateVO getRegisteredTemplate(String templateName, CPU.CPUArch arch) {
- return vmTemplateDao.findLatestTemplateByName(templateName, arch);
- }
-
- private static boolean isRunningInTest() {
- return "true".equalsIgnoreCase(System.getProperty("test.mode"));
- }
-
/**
* Attempts to determine the templates directory path by locating the metadata file.
*
@@ -460,7 +391,170 @@ private static String fetchTemplatesPath() {
throw new CloudRuntimeException(errMsg);
}
- private List getEligibleZoneIds() {
+ protected static void cleanupStore(Long templateId, String filePath) {
+ String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + templateId;
+ try {
+ Files.deleteIfExists(Paths.get(destTempFolder));
+ } catch (IOException e) {
+ LOGGER.error("Failed to cleanup mounted store at: {}", filePath, e);
+ }
+ }
+
+ protected static Pair readTemplatePropertiesSizes(String path) {
+ File tmpFile = new File(path);
+ Long size = null;
+ long physicalSize = 0L;
+ try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr)) {
+ String line = null;
+ while ((line = brf.readLine()) != null) {
+ if (line.startsWith("size=")) {
+ physicalSize = Long.parseLong(line.split("=")[1]);
+ } else if (line.startsWith("virtualsize=")) {
+ size = Long.parseLong(line.split("=")[1]);
+ }
+ if (size == null) {
+ size = physicalSize;
+ }
+ }
+ } catch (IOException ex) {
+ LOGGER.warn("Failed to read from template.properties", ex);
+ }
+ return new Pair<>(size, physicalSize);
+ }
+
+ protected static MetadataTemplateDetails getMetadataTemplateDetails(Hypervisor.HypervisorType hypervisorType,
+ CPU.CPUArch arch) {
+ return METADATA_TEMPLATE_LIST
+ .stream()
+ .filter(x -> Objects.equals(x.getHypervisorType(), hypervisorType) &&
+ Objects.equals(x.getArch(), arch))
+ .findFirst()
+ .orElse(null);
+ }
+
+ protected static String getMetadataFilePath() {
+ return METADATA_FILE;
+ }
+
+ protected static Ini.Section getMetadataSectionForHypervisorAndArch(Ini ini,
+ Hypervisor.HypervisorType hypervisorType, CPU.CPUArch arch) {
+ String key = String.format("%s-%s", hypervisorType.name().toLowerCase(),
+ arch.getType().toLowerCase());
+ Ini.Section section = ini.get(key);
+ if (section == null && !Hypervisor.HypervisorType.KVM.equals(hypervisorType)) {
+ key = String.format("%s", hypervisorType.name().toLowerCase());
+ section = ini.get(key);
+ }
+ return section;
+ }
+
+ protected static String getMountCommand(String nfsVersion, String device, String dir) {
+ String cmd = MOUNT_COMMAND_BASE;
+ if (StringUtils.isNotBlank(nfsVersion)) {
+ cmd = String.format("%s -o vers=%s", cmd, nfsVersion);
+ }
+ return String.format("%s %s %s", cmd, device, dir);
+ }
+
+ /**
+ * This method parses the metadata file consisting of the system VM Templates information
+ * @return the version of the system VM Template that is to be used. This is done in order
+ * to fallback on the latest available version of the system VM Template when there doesn't
+ * exist a template corresponding to the current code version.
+ */
+ public static String parseMetadataFile() {
+ String metadataFilePath = getMetadataFilePath();
+ String errMsg = String.format("Failed to parse system VM Template metadata file: %s", metadataFilePath);
+ final Ini ini = new Ini();
+ try (FileReader reader = new FileReader(metadataFilePath)) {
+ ini.load(reader);
+ } catch (IOException e) {
+ LOGGER.error(errMsg, e);
+ throw new CloudRuntimeException(errMsg, e);
+ }
+ if (!ini.containsKey("default")) {
+ errMsg = String.format("%s as unable to default section", errMsg);
+ LOGGER.error(errMsg);
+ throw new CloudRuntimeException(errMsg);
+ }
+ Ini.Section defaultSection = ini.get("default");
+ String defaultDownloadRepository = defaultSection.get(TEMPLATES_DOWNLOAD_REPOSITORY_KEY);
+ String customDownloadRepository = ServerPropertiesUtil.getProperty(TEMPLATES_CUSTOM_DOWNLOAD_REPOSITORY_KEY);
+ boolean updateCustomDownloadRepository = StringUtils.isNotBlank(customDownloadRepository) &&
+ StringUtils.isNotBlank(defaultDownloadRepository);
+ for (Pair hypervisorTypeArchPair : AVAILABLE_SYSTEM_TEMPLATES_HYPERVISOR_ARCH_LIST) {
+ String key = String.format("%s-%s", hypervisorTypeArchPair.first().name().toLowerCase(),
+ hypervisorTypeArchPair.second().getType().toLowerCase());
+ Ini.Section section = getMetadataSectionForHypervisorAndArch(ini, hypervisorTypeArchPair.first(),
+ hypervisorTypeArchPair.second());
+ if (section == null) {
+ LOGGER.error("Failed to find details for {} in template metadata file: {}",
+ getHypervisorArchLog(hypervisorTypeArchPair.first(), hypervisorTypeArchPair.second()),
+ metadataFilePath);
+ continue;
+ }
+ String url = section.get(TEMPLATE_DOWNLOAD_URL_KEY);
+ if (StringUtils.isNotBlank(url) && updateCustomDownloadRepository) {
+ url = url.replaceFirst(defaultDownloadRepository.trim(),
+ customDownloadRepository.trim());
+ LOGGER.debug("Updated download URL for {} using custom repository to {}", key, url);
+ }
+ METADATA_TEMPLATE_LIST.add(new MetadataTemplateDetails(
+ hypervisorTypeArchPair.first(),
+ section.get("templatename"),
+ section.get("filename"),
+ url,
+ section.get("checksum"),
+ hypervisorTypeArchPair.second(),
+ section.get("guestos")));
+ }
+ return defaultSection.get("version").trim();
+ }
+
+ public static void mountStore(String storeUrl, String path, String nfsVersion) {
+ try {
+ if (storeUrl == null) {
+ return;
+ }
+ URI uri = new URI(UriUtils.encodeURIComponent(storeUrl));
+ String host = uri.getHost();
+ String mountPath = uri.getPath();
+ Script.runSimpleBashScript(getMountCommand(nfsVersion, host + ":" + mountPath, path));
+ } catch (Exception e) {
+ String msg = "NFS Store URL is not in the correct format";
+ LOGGER.error(msg, e);
+ throw new CloudRuntimeException(msg, e);
+ }
+ }
+
+ public static void unmountStore(String filePath) {
+ try {
+ LOGGER.info("Unmounting store");
+ String umountCmd = String.format(UMOUNT_COMMAND, filePath);
+ Script.runSimpleBashScript(umountCmd);
+ try {
+ Files.deleteIfExists(Paths.get(filePath));
+ } catch (IOException e) {
+ LOGGER.error("Failed to cleanup mounted store at: {}", filePath, e);
+ }
+ } catch (Exception e) {
+ String msg = String.format("Failed to unmount store mounted at %s", filePath);
+ LOGGER.error(msg, e);
+ throw new CloudRuntimeException(msg, e);
+ }
+ }
+
+ protected File getTempDownloadDir() {
+ return tempDownloadDir;
+ }
+
+ protected void readTemplateProperties(String path, SystemVMTemplateDetails details) {
+ Pair templateSizes = readTemplatePropertiesSizes(path);
+ details.setSize(templateSizes.first());
+ details.setPhysicalSize(templateSizes.second());
+ }
+
+ protected List getEligibleZoneIds() {
List zoneIds = new ArrayList<>();
List stores = imageStoreDao.findByProtocol("nfs");
for (ImageStoreVO store : stores) {
@@ -484,27 +578,18 @@ protected Pair getNfsStoreInZone(Long zoneId) {
return new Pair<>(url, storeId);
}
- public static void mountStore(String storeUrl, String path, String nfsVersion) {
- try {
- if (storeUrl == null) {
- return;
- }
- URI uri = new URI(UriUtils.encodeURIComponent(storeUrl));
- String host = uri.getHost();
- String mountPath = uri.getPath();
- Script.runSimpleBashScript(getMountCommand(nfsVersion, host + ":" + mountPath, path));
- } catch (Exception e) {
- String msg = "NFS Store URL is not in the correct format";
- LOGGER.error(msg, e);
- throw new CloudRuntimeException(msg, e);
+ protected String getSystemVmTemplateVersion() {
+ if (StringUtils.isEmpty(systemVmTemplateVersion)) {
+ return String.format("%s.%s", CS_MAJOR_VERSION, CS_TINY_VERSION);
}
+ return systemVmTemplateVersion;
}
private VMTemplateVO createTemplateObjectInDB(SystemVMTemplateDetails details) {
Long templateId = vmTemplateDao.getNextInSequence(Long.class, "id");
VMTemplateVO template = new VMTemplateVO();
template.setUuid(details.getUuid());
- template.setUniqueName(String.format("routing-%s" , String.valueOf(templateId)));
+ template.setUniqueName(String.format("routing-%s" , templateId));
template.setName(details.getName());
template.setPublicTemplate(false);
template.setFeatured(false);
@@ -527,7 +612,7 @@ private VMTemplateVO createTemplateObjectInDB(SystemVMTemplateDetails details) {
return template;
}
- private VMTemplateZoneVO createOrUpdateTemplateZoneEntry(long zoneId, long templateId) {
+ protected VMTemplateZoneVO createOrUpdateTemplateZoneEntry(long zoneId, long templateId) {
VMTemplateZoneVO templateZoneVO = vmTemplateZoneDao.findByZoneTemplate(zoneId, templateId);
if (templateZoneVO == null) {
templateZoneVO = new VMTemplateZoneVO(zoneId, templateId, new java.util.Date());
@@ -541,33 +626,37 @@ private VMTemplateZoneVO createOrUpdateTemplateZoneEntry(long zoneId, long templ
return templateZoneVO;
}
- private void createCrossZonesTemplateZoneRefEntries(Long templateId) {
+ protected void createCrossZonesTemplateZoneRefEntries(Long templateId) {
List dcs = dataCenterDao.listAll();
for (DataCenterVO dc : dcs) {
VMTemplateZoneVO templateZoneVO = createOrUpdateTemplateZoneEntry(dc.getId(), templateId);
if (templateZoneVO == null) {
- throw new CloudRuntimeException(String.format("Failed to create template_zone_ref record for the systemVM Template (id: %s) and zone: %s", templateId, dc));
+ throw new CloudRuntimeException(String.format("Failed to create template-zone record for the system " +
+ "VM Template (ID : %d) and zone: %s", templateId, dc));
}
}
}
- private void createTemplateStoreRefEntry(SystemVMTemplateDetails details) {
- TemplateDataStoreVO templateDataStoreVO = new TemplateDataStoreVO(details.storeId, details.getId(), details.getCreated(), 0,
- VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED, null, null, null, details.getInstallPath(), details.getUrl());
+ protected void createTemplateStoreRefEntry(SystemVMTemplateDetails details) {
+ TemplateDataStoreVO templateDataStoreVO = new TemplateDataStoreVO(details.getStoreId(), details.getId(),
+ details.getCreated(), 0, VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED,
+ null, null, null, details.getInstallPath(), details.getUrl());
templateDataStoreVO.setDataStoreRole(DataStoreRole.Image);
templateDataStoreVO = templateDataStoreDao.persist(templateDataStoreVO);
if (templateDataStoreVO == null) {
- throw new CloudRuntimeException(String.format("Failed to create template_store_ref record for the systemVM Template for hypervisor: %s", details.getHypervisorType().name()));
+ throw new CloudRuntimeException(String.format("Failed to create template-store record for the system VM " +
+ "template (ID : %d) and store (ID: %d)", details.getId(), details.getStoreId()));
}
}
- public void updateTemplateDetails(SystemVMTemplateDetails details) {
+ protected void updateTemplateDetails(SystemVMTemplateDetails details) {
VMTemplateVO template = vmTemplateDao.findById(details.getId());
template.setSize(details.getSize());
template.setState(VirtualMachineTemplate.State.Active);
vmTemplateDao.update(template.getId(), template);
- TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(details.getStoreId(), template.getId());
+ TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(details.getStoreId(),
+ template.getId());
templateDataStoreVO.setSize(details.getSize());
templateDataStoreVO.setPhysicalSize(details.getPhysicalSize());
templateDataStoreVO.setDownloadPercent(100);
@@ -576,11 +665,11 @@ public void updateTemplateDetails(SystemVMTemplateDetails details) {
templateDataStoreVO.setState(ObjectInDataStoreStateMachine.State.Ready);
boolean updated = templateDataStoreDao.update(templateDataStoreVO.getId(), templateDataStoreVO);
if (!updated) {
- throw new CloudRuntimeException("Failed to update template_store_ref entry for registered systemVM Template");
+ throw new CloudRuntimeException("Failed to update template-store record for registered system VM Template");
}
}
- public void updateSeededTemplateDetails(long templateId, long storeId, long size, long physicalSize) {
+ protected void updateSeededTemplateDetails(long templateId, long storeId, long size, long physicalSize) {
VMTemplateVO template = vmTemplateDao.findById(templateId);
template.setSize(size);
vmTemplateDao.update(template.getId(), template);
@@ -591,108 +680,75 @@ public void updateSeededTemplateDetails(long templateId, long storeId, long size
templateDataStoreVO.setLastUpdated(new Date(DateUtil.currentGMTTime().getTime()));
boolean updated = templateDataStoreDao.update(templateDataStoreVO.getId(), templateDataStoreVO);
if (!updated) {
- throw new CloudRuntimeException("Failed to update template_store_ref entry for seeded systemVM template");
+ throw new CloudRuntimeException("Failed to update template-store record for seeded system VM Template");
}
}
- public void updateSystemVMEntries(Long templateId, Hypervisor.HypervisorType hypervisorType) {
+ protected void updateSystemVMEntries(Long templateId, Hypervisor.HypervisorType hypervisorType) {
vmInstanceDao.updateSystemVmTemplateId(templateId, hypervisorType);
}
- private void updateSystemVmTemplateGuestOsId() {
- String systemVmGuestOsName = "Debian GNU/Linux 12 (64-bit)"; // default
+ protected void updateHypervisorGuestOsMap() {
try {
- GuestOSVO guestOS = guestOSDao.findOneByDisplayName(systemVmGuestOsName);
- if (guestOS != null) {
- LOGGER.debug("Updating SystemVM Template Guest OS [{}] id", systemVmGuestOsName);
- SystemVmTemplateRegistration.LINUX_12_ID = Math.toIntExact(guestOS.getId());
- hypervisorGuestOsMap.put(Hypervisor.HypervisorType.KVM, LINUX_12_ID);
- hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID);
- hypervisorGuestOsMap.put(Hypervisor.HypervisorType.LXC, LINUX_12_ID);
- hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID);
+ GuestOSVO guestOS = guestOSDao.findOneByDisplayName(DEFAULT_SYSTEM_VM_GUEST_OS_NAME);
+ if (guestOS == null) {
+ LOGGER.warn("Couldn't find Guest OS by name [{}] to update system VM Template guest OS ID",
+ DEFAULT_SYSTEM_VM_GUEST_OS_NAME);
+ return;
}
+ LOGGER.debug("Updating system VM Template guest OS [{}] ID", DEFAULT_SYSTEM_VM_GUEST_OS_NAME);
+ SystemVmTemplateRegistration.LINUX_12_ID = Math.toIntExact(guestOS.getId());
+ hypervisorGuestOsMap.put(Hypervisor.HypervisorType.KVM, LINUX_12_ID);
+ hypervisorGuestOsMap.put(Hypervisor.HypervisorType.LXC, LINUX_12_ID);
} catch (Exception e) {
- LOGGER.warn("Couldn't update SystemVM Template Guest OS id, due to {}", e.getMessage());
+ LOGGER.warn("Couldn't update System VM template guest OS ID, due to {}", e.getMessage());
}
}
- public void updateConfigurationParams(Map configParams) {
- for (Map.Entry config : configParams.entrySet()) {
- boolean updated = configurationDao.update(config.getKey(), config.getValue());
- if (!updated) {
- throw new CloudRuntimeException(String.format("Failed to update configuration parameter %s", config.getKey()));
- }
+ protected void updateConfigurationParams(Hypervisor.HypervisorType hypervisorType, String templateName, Long zoneId) {
+ String configName = ROUTER_TEMPLATE_CONFIGURATION_NAMES.get(hypervisorType);
+ boolean updated = configurationDao.update(configName, templateName);
+ if (!updated) {
+ throw new CloudRuntimeException(String.format("Failed to update configuration parameter %s", configName));
}
- }
-
- private static Pair readTemplatePropertiesSizes(String path) {
- File tmpFile = new File(path);
- Long size = null;
- Long physicalSize = 0L;
- try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr);) {
- String line = null;
- while ((line = brf.readLine()) != null) {
- if (line.startsWith("size=")) {
- physicalSize = Long.parseLong(line.split("=")[1]);
- } else if (line.startsWith("virtualsize=")) {
- size = Long.parseLong(line.split("=")[1]);
- }
- if (size == null) {
- size = physicalSize;
- }
- }
- } catch (IOException ex) {
- LOGGER.warn("Failed to read from template.properties", ex);
+ if (zoneId != null) {
+ dataCenterDetailsDao.removeDetail(zoneId, configName);
+ }
+ updated = configurationDao.update(MINIMUM_SYSTEM_VM_VERSION_KEY, getSystemVmTemplateVersion());
+ if (!updated) {
+ throw new CloudRuntimeException(String.format("Failed to update configuration parameter %s", configName));
+ }
+ if (zoneId != null) {
+ dataCenterDetailsDao.removeDetail(zoneId, MINIMUM_SYSTEM_VM_VERSION_KEY);
}
- return new Pair<>(size, physicalSize);
- }
-
- public static void readTemplateProperties(String path, SystemVMTemplateDetails details) {
- Pair templateSizes = readTemplatePropertiesSizes(path);
- details.setSize(templateSizes.first());
- details.setPhysicalSize(templateSizes.second());
}
- private void updateTemplateTablesOnFailure(long templateId) {
+ protected void updateTemplateEntriesOnFailure(long templateId) {
VMTemplateVO template = vmTemplateDao.createForUpdate(templateId);
template.setState(VirtualMachineTemplate.State.Inactive);
vmTemplateDao.update(template.getId(), template);
vmTemplateDao.remove(templateId);
- TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(template.getId(), DataStoreRole.Image);
- templateDataStoreDao.remove(templateDataStoreVO.getId());
- }
-
- public static void unmountStore(String filePath) {
- try {
- LOGGER.info("Unmounting store");
- String umountCmd = String.format(UMOUNT_COMMAND, filePath);
- Script.runSimpleBashScript(umountCmd);
- try {
- Files.deleteIfExists(Paths.get(filePath));
- } catch (IOException e) {
- LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e);
- }
- } catch (Exception e) {
- String msg = String.format("Failed to unmount store mounted at %s", filePath);
- LOGGER.error(msg, e);
- throw new CloudRuntimeException(msg, e);
+ TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(template.getId(),
+ DataStoreRole.Image);
+ if (templateDataStoreVO == null) {
+ return;
}
+ templateDataStoreDao.remove(templateDataStoreVO.getId());
}
- private void setupTemplate(String templateName, Hypervisor.HypervisorType hypervisor, CPU.CPUArch arch,
- String destTempFolder) throws CloudRuntimeException {
- String setupTmpltScript = Script.findScript(storageScriptsDir, "setup-sysvm-tmplt");
+ protected void setupTemplateOnStore(String templateName, MetadataTemplateDetails templateDetails,
+ String destTempFolder) throws CloudRuntimeException {
+ String setupTmpltScript = Script.findScript(STORAGE_SCRIPTS_DIR, "setup-sysvm-tmplt");
if (setupTmpltScript == null) {
- throw new CloudRuntimeException("Unable to find the createtmplt.sh");
+ throw new CloudRuntimeException("Unable to find the setup-sysvm-tmplt script");
}
Script scr = new Script(setupTmpltScript, SCRIPT_TIMEOUT, LOGGER);
scr.add("-u", templateName);
- MetadataTemplateDetails templateDetails = NewTemplateMap.get(getHypervisorArchKey(hypervisor, arch));
String filePath = StringUtils.isNotBlank(templateDetails.getDownloadedFilePath()) ?
templateDetails.getDownloadedFilePath() :
templateDetails.getDefaultFilePath();
scr.add("-f", filePath);
- scr.add("-h", hypervisor.name().toLowerCase(Locale.ROOT));
+ scr.add("-h", templateDetails.getHypervisorType().name().toLowerCase(Locale.ROOT));
scr.add("-d", destTempFolder);
String result = scr.execute();
if (result != null) {
@@ -702,17 +758,33 @@ private void setupTemplate(String templateName, Hypervisor.HypervisorType hyperv
}
}
- private Long performTemplateRegistrationOperations(Hypervisor.HypervisorType hypervisor,
- String name, CPU.CPUArch arch, String url, String checksum, ImageFormat format, long guestOsId,
- Long storeId, Long templateId, String filePath, TemplateDataStoreVO templateDataStoreVO) {
+ /**
+ * Register or update a system VM Template record and seed it on the target store.
+ *
+ * @param name display name of the template
+ * @param templateDetails metadata for the template
+ * @param url download URL of the template
+ * @param checksum expected checksum of the template file
+ * @param format image format of the template
+ * @param guestOsId guest OS id
+ * @param storeId target image store id
+ * @param templateId existing template id if present, otherwise {@code null}
+ * @param filePath temporary mount path for the store
+ * @param templateDataStoreVO existing template-store mapping; may be {@code null}
+ * @return the id of the template that was created or updated
+ */
+ protected Long performTemplateRegistrationOperations(String name, MetadataTemplateDetails templateDetails,
+ String url, String checksum, ImageFormat format, long guestOsId, Long storeId, Long templateId,
+ String filePath, TemplateDataStoreVO templateDataStoreVO) {
String templateName = UUID.randomUUID().toString();
Date created = new Date(DateUtil.currentGMTTime().getTime());
- SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, name, created,
- url, checksum, format, (int) guestOsId, hypervisor, arch, storeId);
+ SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, name, created, url, checksum,
+ format, (int) guestOsId, templateDetails.getHypervisorType(), templateDetails.getArch(), storeId);
if (templateId == null) {
VMTemplateVO template = createTemplateObjectInDB(details);
if (template == null) {
- throw new CloudRuntimeException(String.format("Failed to register Template for hypervisor: %s", hypervisor.name()));
+ throw new CloudRuntimeException(String.format("Failed to register Template for hypervisor: %s",
+ templateDetails.getHypervisorType().name()));
}
templateId = template.getId();
}
@@ -721,153 +793,126 @@ private Long performTemplateRegistrationOperations(Hypervisor.HypervisorType hyp
details.setId(templateId);
String destTempFolderName = String.valueOf(templateId);
String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + destTempFolderName;
- details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension());
+ details.setInstallPath(String.format("%s%s%s%s.%s", PARTIAL_TEMPLATE_FOLDER, destTempFolderName,
+ File.separator, templateName,
+ HYPERVISOR_IMAGE_FORMAT_MAP.get(templateDetails.getHypervisorType()).getFileExtension()));
if (templateDataStoreVO == null) {
createTemplateStoreRefEntry(details);
}
- setupTemplate(templateName, hypervisor, arch, destTempFolder);
+ setupTemplateOnStore(templateName, templateDetails, destTempFolder);
readTemplateProperties(destTempFolder + "/template.properties", details);
details.setUpdated(new Date(DateUtil.currentGMTTime().getTime()));
updateTemplateDetails(details);
return templateId;
}
- public void registerTemplate(Hypervisor.HypervisorType hypervisor, String name, Long storeId,
- VMTemplateVO templateVO, TemplateDataStoreVO templateDataStoreVO, String filePath) {
+ /**
+ * Add an existing system VM Template to a secondary image store and update related DB entries.
+ *
+ * @param templateVO the existing VM template (must not be null)
+ * @param templateDetails the metadata details of the template to be added
+ * @param templateDataStoreVO optional existing template-store mapping; may be null
+ * @param zoneId zone id where the operation is performed
+ * @param storeId target image store id
+ * @param filePath temporary mount path for the store
+ * @throws CloudRuntimeException on failure; the method attempts rollback/cleanup
+ */
+ protected void addExistingTemplateToStore(VMTemplateVO templateVO, MetadataTemplateDetails templateDetails,
+ TemplateDataStoreVO templateDataStoreVO, long zoneId, Long storeId, String filePath) {
try {
- performTemplateRegistrationOperations(hypervisor, name, templateVO.getArch(), templateVO.getUrl(),
+ performTemplateRegistrationOperations(templateVO.getName(), templateDetails, templateVO.getUrl(),
templateVO.getChecksum(), templateVO.getFormat(), templateVO.getGuestOSId(), storeId,
templateVO.getId(), filePath, templateDataStoreVO);
} catch (Exception e) {
- String errMsg = String.format("Failed to register Template for hypervisor: %s", hypervisor);
+ String errMsg = String.format("Failed to add %s to store ID: %d, zone ID: %d", templateVO, storeId, zoneId);
LOGGER.error(errMsg, e);
- updateTemplateTablesOnFailure(templateVO.getId());
cleanupStore(templateVO.getId(), filePath);
throw new CloudRuntimeException(errMsg, e);
}
}
- public void registerTemplateForNonExistingEntries(Hypervisor.HypervisorType hypervisor, CPU.CPUArch arch,
- String name, Pair storeUrlAndId, String filePath) {
+ /**
+ * Registers a new system VM Template for the given hypervisor/arch when no existing template is present.
+ *
+ * @param name the name of the new template
+ * @param templateDetails the metadata details of the template to be registered
+ * @param zoneId the zone id for which the new template should be seeded
+ * @param storeId the store id on which the new template will be seeded
+ * @param filePath temporary mount path for the store
+ * @throws CloudRuntimeException on failure; the method attempts rollback/cleanup
+ */
+ protected void registerNewTemplate(String name, MetadataTemplateDetails templateDetails, long zoneId, Long storeId,
+ String filePath) {
Long templateId = null;
+ Hypervisor.HypervisorType hypervisor = templateDetails.getHypervisorType();
try {
- MetadataTemplateDetails templateDetails = getMetadataTemplateDetails(hypervisor, arch);
- templateId = performTemplateRegistrationOperations(hypervisor, name,
- templateDetails.getArch(), templateDetails.getUrl(),
- templateDetails.getChecksum(), hypervisorImageFormat.get(hypervisor),
- hypervisorGuestOsMap.get(hypervisor), storeUrlAndId.second(), null, filePath, null);
- Map configParams = new HashMap<>();
- configParams.put(RouterTemplateConfigurationNames.get(hypervisor), templateDetails.getName());
- configParams.put("minreq.sysvmtemplate.version", getSystemVmTemplateVersion());
- updateConfigurationParams(configParams);
+ templateId = performTemplateRegistrationOperations(name, templateDetails, templateDetails.getUrl(),
+ templateDetails.getChecksum(), HYPERVISOR_IMAGE_FORMAT_MAP.get(hypervisor),
+ hypervisorGuestOsMap.get(hypervisor), storeId, null, filePath, null);
+ updateConfigurationParams(hypervisor, name, zoneId);
updateSystemVMEntries(templateId, hypervisor);
} catch (Exception e) {
String errMsg = String.format("Failed to register Template for hypervisor: %s", hypervisor);
LOGGER.error(errMsg, e);
if (templateId != null) {
- updateTemplateTablesOnFailure(templateId);
+ updateTemplateEntriesOnFailure(templateId);
cleanupStore(templateId, filePath);
}
throw new CloudRuntimeException(errMsg, e);
}
}
- protected void validateTemplateFileForHypervisorAndArch(Hypervisor.HypervisorType hypervisor, CPU.CPUArch arch) {
+ /**
+ * Validate presence and integrity of metadata and local template file for the given hypervisor/arch.
+ *
+ * @param hypervisor target hypervisor type
+ * @param arch target CPU architecture
+ * @return validated MetadataTemplateDetails
+ * @throws CloudRuntimeException if template is not available, missing, or checksum validation fails
+ */
+ protected MetadataTemplateDetails getValidatedTemplateDetailsForHypervisorAndArch(
+ Hypervisor.HypervisorType hypervisor, CPU.CPUArch arch) {
+ if (!AVAILABLE_SYSTEM_TEMPLATES_HYPERVISOR_ARCH_LIST.contains(new Pair<>(hypervisor, arch))) {
+ throw new CloudRuntimeException("No system VM Template available for the given hypervisor and arch");
+ }
MetadataTemplateDetails templateDetails = getMetadataTemplateDetails(hypervisor, arch);
+ if (templateDetails == null) {
+ throw new CloudRuntimeException("No template details found for the given hypervisor and arch");
+ }
File templateFile = getTemplateFile(templateDetails);
if (templateFile == null) {
throw new CloudRuntimeException("Failed to find local template file");
}
- if (isTemplateFileChecksumDifferent(templateDetails, templateFile)) {
+ if (templateDetails.isFileChecksumDifferent(templateFile)) {
throw new CloudRuntimeException("Checksum failed for local template file");
}
- }
-
- public void validateAndRegisterTemplate(Hypervisor.HypervisorType hypervisor, String name, Long storeId,
- VMTemplateVO templateVO, TemplateDataStoreVO templateDataStoreVO, String filePath) {
- validateTemplateFileForHypervisorAndArch(hypervisor, templateVO.getArch());
- registerTemplate(hypervisor, name, storeId, templateVO, templateDataStoreVO, filePath);
- }
-
- public void validateAndRegisterTemplateForNonExistingEntries(Hypervisor.HypervisorType hypervisor,
- CPU.CPUArch arch, String name, Pair storeUrlAndId, String filePath) {
- validateTemplateFileForHypervisorAndArch(hypervisor, arch);
- registerTemplateForNonExistingEntries(hypervisor, arch, name, storeUrlAndId, filePath);
- }
-
- protected static String getMetadataFilePath() {
- return METADATA_FILE;
+ return templateDetails;
}
/**
- * This method parses the metadata file consisting of the systemVM templates information
- * @return the version of the systemvm template that is to be used. This is done in order
- * to fallback on the latest available version of the systemVM template when there doesn't
- * exist a template corresponding to the current code version.
+ * Return the local template file. Downloads it if not present locally and url is present.
+ *
+ * @param templateDetails template metadata; may set `downloadedFilePath`
+ * @return the template {@code File} on disk, or {@code null} if not found/downloaded
*/
- public static String parseMetadataFile() {
- String metadataFilePath = getMetadataFilePath();
- String errMsg = String.format("Failed to parse systemVM Template metadata file: %s", metadataFilePath);
- final Ini ini = new Ini();
- try (FileReader reader = new FileReader(metadataFilePath)) {
- ini.load(reader);
- } catch (IOException e) {
- LOGGER.error(errMsg, e);
- throw new CloudRuntimeException(errMsg, e);
- }
- if (!ini.containsKey("default")) {
- errMsg = String.format("%s as unable to default section", errMsg);
- LOGGER.error(errMsg);
- throw new CloudRuntimeException(errMsg);
- }
- for (Pair hypervisorType : hypervisorList) {
- String key = getHypervisorArchKey(hypervisorType.first(), hypervisorType.second());
- Ini.Section section = ini.get(key);
- if (section == null) {
- LOGGER.error("Failed to find details for {} in template metadata file: {}",
- key, metadataFilePath);
- continue;
- }
- NewTemplateMap.put(key, new MetadataTemplateDetails(
- hypervisorType.first(),
- section.get("templatename"),
- section.get("filename"),
- section.get("downloadurl"),
- section.get("checksum"),
- hypervisorType.second(),
- section.get("guestos")));
- }
- Ini.Section defaultSection = ini.get("default");
- return defaultSection.get("version").trim();
- }
-
-
- private static void cleanupStore(Long templateId, String filePath) {
- String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + String.valueOf(templateId);
- try {
- Files.deleteIfExists(Paths.get(destTempFolder));
- } catch (IOException e) {
- LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e);
- }
- }
-
protected File getTemplateFile(MetadataTemplateDetails templateDetails) {
File templateFile = new File(templateDetails.getDefaultFilePath());
if (templateFile.exists()) {
return templateFile;
}
LOGGER.debug("{} is not present", templateFile.getAbsolutePath());
- if (DOWNLOADABLE_TEMPLATE_ARCH_TYPES.contains(templateDetails.getArch()) &&
- StringUtils.isNotBlank(templateDetails.getUrl())) {
+ if (StringUtils.isNotBlank(templateDetails.getUrl())) {
LOGGER.debug("Downloading the template file {} for {}",
templateDetails.getUrl(), templateDetails.getHypervisorArchLog());
Path path = Path.of(TEMPLATES_PATH);
if (!Files.isWritable(path)) {
- templateFile = new File(tempDownloadDir, templateDetails.getFilename());
+ templateFile = new File(getTempDownloadDir(), templateDetails.getFilename());
}
if (!templateFile.exists() &&
!HttpUtils.downloadFileWithProgress(templateDetails.getUrl(), templateFile.getAbsolutePath(),
LOGGER)) {
+ LOGGER.error("Failed to download template for {} using url: {}",
+ templateDetails.getHypervisorArchLog(), templateDetails.getUrl());
return null;
}
templateDetails.setDownloadedFilePath(templateFile.getAbsolutePath());
@@ -875,32 +920,27 @@ protected File getTemplateFile(MetadataTemplateDetails templateDetails) {
return templateFile;
}
- protected boolean isTemplateFileChecksumDifferent(MetadataTemplateDetails templateDetails, File templateFile) {
- String templateChecksum = DigestHelper.calculateChecksum(templateFile);
- if (!templateChecksum.equals(templateDetails.getChecksum())) {
- LOGGER.error("Checksum {} for file {} does not match checksum {} from metadata",
- templateChecksum, templateFile, templateDetails.getChecksum());
- return true;
- }
- return false;
- }
-
- protected void validateTemplates(List> hypervisorsArchInUse) {
+ /**
+ * Validate that templates for the provided hypervisor/architecture pairs which are in use and are valid.
+ * If a template is missing or validation fails for any required pair, a
+ * {@link CloudRuntimeException} is thrown to abort the upgrade. If system VM Template for a hypervisor/arch is
+ * not considered available then validation is skipped for that pair.
+ *
+ * @param hypervisorArchList list of hypervisor/architecture pairs to validate
+ */
+ protected void validateTemplates(List> hypervisorArchList) {
boolean templatesFound = true;
- for (Pair hypervisorArch : hypervisorsArchInUse) {
- MetadataTemplateDetails matchedTemplate = getMetadataTemplateDetails(hypervisorArch.first(),
- hypervisorArch.second());
- if (matchedTemplate == null) {
- templatesFound = false;
- break;
- }
- File tempFile = getTemplateFile(matchedTemplate);
- if (tempFile == null) {
- LOGGER.warn("Failed to download template for {}, moving ahead",
- matchedTemplate.getHypervisorArchLog());
+ for (Pair hypervisorArch : hypervisorArchList) {
+ if (!AVAILABLE_SYSTEM_TEMPLATES_HYPERVISOR_ARCH_LIST.contains(hypervisorArch)) {
+ LOGGER.info("No system VM Template available for {}. Skipping validation.",
+ getHypervisorArchLog(hypervisorArch.first(), hypervisorArch.second()));
continue;
}
- if (isTemplateFileChecksumDifferent(matchedTemplate, tempFile)) {
+ try {
+ getValidatedTemplateDetailsForHypervisorAndArch(hypervisorArch.first(), hypervisorArch.second());
+ } catch (CloudRuntimeException e) {
+ LOGGER.error("Validation failed for {}: {}",
+ getHypervisorArchLog(hypervisorArch.first(), hypervisorArch.second()), e.getMessage());
templatesFound = false;
break;
}
@@ -912,10 +952,19 @@ protected void validateTemplates(List storeUrlAndId = getNfsStoreInZone(zoneId);
String nfsVersion = getNfsVersion(storeUrlAndId.second());
- mountStore(storeUrlAndId.first(), filePath, nfsVersion);
+ mountStore(storeUrlAndId.first(), storeMountPath, nfsVersion);
List> hypervisorArchList =
clusterDao.listDistinctHypervisorsAndArchExcludingExternalType(zoneId);
for (Pair hypervisorArch : hypervisorArchList) {
@@ -925,7 +974,8 @@ protected void registerTemplatesForZone(long zoneId, String filePath) {
if (templateDetails == null) {
continue;
}
- VMTemplateVO templateVO = getRegisteredTemplate(templateDetails.getName(), templateDetails.getArch());
+ VMTemplateVO templateVO = getRegisteredTemplate(templateDetails.getName(),
+ templateDetails.getHypervisorType(), templateDetails.getArch(), templateDetails.getUrl());
if (templateVO != null) {
TemplateDataStoreVO templateDataStoreVO =
templateDataStoreDao.findByStoreTemplate(storeUrlAndId.second(), templateVO.getId());
@@ -935,22 +985,22 @@ protected void registerTemplatesForZone(long zoneId, String filePath) {
continue;
}
}
- registerTemplate(hypervisorType, templateDetails.getName(), storeUrlAndId.second(), templateVO,
- templateDataStoreVO, filePath);
- updateRegisteredTemplateDetails(templateVO.getId(), templateDetails);
+ addExistingTemplateToStore(templateVO, templateDetails, templateDataStoreVO, zoneId,
+ storeUrlAndId.second(), storeMountPath);
+ updateRegisteredTemplateDetails(templateVO.getId(), templateDetails, zoneId);
continue;
}
- registerTemplateForNonExistingEntries(hypervisorType, templateDetails.getArch(), templateDetails.getName(),
- storeUrlAndId, filePath);
+ registerNewTemplate(templateDetails.getName(), templateDetails, zoneId, storeUrlAndId.second(),
+ storeMountPath);
}
}
- public void registerTemplates(List> hypervisorsArchInUse) {
+ protected void registerTemplates(List> hypervisorsArchInUse) {
GlobalLock lock = GlobalLock.getInternLock("UpgradeDatabase-Lock");
try {
LOGGER.info("Grabbing lock to register Templates.");
if (!lock.lock(LOCK_WAIT_TIMEOUT)) {
- throw new CloudRuntimeException("Unable to acquire lock to register SystemVM Template.");
+ throw new CloudRuntimeException("Unable to acquire lock to register system VM Template.");
}
try {
validateTemplates(hypervisorsArchInUse);
@@ -970,13 +1020,13 @@ public void doInTransactionWithoutResult(final TransactionStatus status) {
unmountStore(filePath);
} catch (Exception e) {
unmountStore(filePath);
- throw new CloudRuntimeException("Failed to register SystemVM Template. Upgrade failed");
+ throw new CloudRuntimeException("Failed to register system VM Template. Upgrade Failed");
}
}
}
});
} catch (Exception e) {
- throw new CloudRuntimeException("Failed to register SystemVM Template. Upgrade failed");
+ throw new CloudRuntimeException("Failed to register system VM Template. Upgrade Failed");
}
} finally {
lock.unlock();
@@ -984,7 +1034,18 @@ public void doInTransactionWithoutResult(final TransactionStatus status) {
}
}
- private void updateRegisteredTemplateDetails(Long templateId, MetadataTemplateDetails templateDetails) {
+ /**
+ * Update the DB record for an existing template to mark it as a system template,
+ * set the guest OS (if resolvable), and propagate the change to system VM entries
+ * and related configuration for the template's hypervisor.
+ *
+ * @param templateId id of the template to update
+ * @param templateDetails metadata used to update the template record
+ * @param zoneId zone id whose per-zone details (if any) should be cleared; may be null
+ * @throws CloudRuntimeException if updating the template record fails
+ */
+ protected void updateRegisteredTemplateDetails(Long templateId, MetadataTemplateDetails templateDetails,
+ Long zoneId) {
VMTemplateVO templateVO = vmTemplateDao.findById(templateId);
templateVO.setTemplateType(Storage.TemplateType.SYSTEM);
GuestOSVO guestOS = guestOSDao.findOneByDisplayName(templateDetails.getGuestOs());
@@ -993,20 +1054,18 @@ private void updateRegisteredTemplateDetails(Long templateId, MetadataTemplateDe
}
boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO);
if (!updated) {
- String errMsg = String.format("updateSystemVmTemplates:Exception while updating Template with id %s to be marked as 'system'", templateId);
+ String errMsg = String.format("Exception while updating template with id %s to be marked as 'system'",
+ templateId);
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
Hypervisor.HypervisorType hypervisorType = templateDetails.getHypervisorType();
updateSystemVMEntries(templateId, hypervisorType);
- // Change value of global configuration parameter router.template.* for the corresponding hypervisor and minreq.sysvmtemplate.version for the ACS version
- Map configParams = new HashMap<>();
- configParams.put(RouterTemplateConfigurationNames.get(hypervisorType), templateDetails.getName());
- configParams.put("minreq.sysvmtemplate.version", getSystemVmTemplateVersion());
- updateConfigurationParams(configParams);
+ updateConfigurationParams(hypervisorType, templateDetails.getName(), zoneId);
}
- private void updateTemplateUrlChecksumAndGuestOsId(VMTemplateVO templateVO, MetadataTemplateDetails templateDetails) {
+ protected void updateTemplateUrlChecksumAndGuestOsId(VMTemplateVO templateVO,
+ MetadataTemplateDetails templateDetails) {
templateVO.setUrl(templateDetails.getUrl());
templateVO.setChecksum(templateDetails.getChecksum());
GuestOSVO guestOS = guestOSDao.findOneByDisplayName(templateDetails.getGuestOs());
@@ -1015,51 +1074,192 @@ private void updateTemplateUrlChecksumAndGuestOsId(VMTemplateVO templateVO, Meta
}
boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO);
if (!updated) {
- String errMsg = String.format("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type %s", templateDetails.getHypervisorType());
+ String errMsg = String.format("Exception while updating 'url' and 'checksum' for hypervisor type %s",
+ templateDetails.getHypervisorType());
LOGGER.error(errMsg);
throw new CloudRuntimeException(errMsg);
}
}
- protected boolean registerOrUpdateSystemVmTemplate(MetadataTemplateDetails templateDetails,
- List> hypervisorsInUse) {
- LOGGER.debug("Updating System VM template for {}", templateDetails.getHypervisorArchLog());
- VMTemplateVO registeredTemplate = getRegisteredTemplate(templateDetails.getName(), templateDetails.getArch());
- // change template type to SYSTEM
+ /**
+ * Updates or registers the system VM Template for the given hypervisor/arch if not already present.
+ * Returns true if a new template was registered.
+ * If there is an existing system VM Template for the given hypervisor/arch, its details are updated.
+ * If no existing template is found, new templates are registered for the valid hypervisor/arch which are in use.
+ */
+ protected boolean updateOrRegisterSystemVmTemplate(MetadataTemplateDetails templateDetails,
+ List> hypervisorArchInUse) {
+ String systemVmTemplateLog = String.format("%s system VM Template for %s", getSystemVmTemplateVersion(),
+ templateDetails.getHypervisorArchLog());
+ LOGGER.debug("Registering or updating {}", systemVmTemplateLog,
+ templateDetails.getHypervisorArchLog());
+ VMTemplateVO registeredTemplate = getRegisteredTemplate(templateDetails.getName(),
+ templateDetails.getHypervisorType(), templateDetails.getArch(), templateDetails.getUrl());
if (registeredTemplate != null) {
- updateRegisteredTemplateDetails(registeredTemplate.getId(), templateDetails);
- } else {
- boolean isHypervisorArchMatchMetadata = hypervisorsInUse.stream()
- .anyMatch(p -> p.first().equals(templateDetails.getHypervisorType())
- && Objects.equals(p.second(), templateDetails.getArch()));
- if (isHypervisorArchMatchMetadata) {
- try {
- registerTemplates(hypervisorsInUse);
- return true;
- } catch (final Exception e) {
- throw new CloudRuntimeException(String.format("Failed to register %s templates for hypervisors: [%s]. " +
- "Cannot upgrade system VMs",
- getSystemVmTemplateVersion(),
- StringUtils.join(hypervisorsInUse.stream()
- .map(x -> getHypervisorArchKey(x.first(), x.second()))
- .collect(Collectors.toList()), ",")), e);
- }
- } else {
- LOGGER.warn("Cannot upgrade {} system VM template for {} as it is not used, not failing upgrade",
- getSystemVmTemplateVersion(), templateDetails.getHypervisorArchLog());
- VMTemplateVO templateVO = vmTemplateDao.findLatestTemplateByTypeAndHypervisorAndArch(
- templateDetails.getHypervisorType(), templateDetails.getArch(), Storage.TemplateType.SYSTEM);
- if (templateVO != null) {
- updateTemplateUrlChecksumAndGuestOsId(templateVO, templateDetails);
- }
+ LOGGER.info("{} is already registered, updating details for: {}",
+ systemVmTemplateLog, templateDetails.getHypervisorArchLog(), registeredTemplate);
+ updateRegisteredTemplateDetails(registeredTemplate.getId(), templateDetails, null);
+ return false;
+ }
+ boolean isHypervisorArchMatchMetadata = hypervisorArchInUse.stream()
+ .anyMatch(p -> p.first().equals(templateDetails.getHypervisorType())
+ && Objects.equals(p.second(), templateDetails.getArch()));
+ if (!isHypervisorArchMatchMetadata) {
+ LOGGER.warn("Skipping upgrading {} as it is not used, not failing upgrade",
+ getSystemVmTemplateVersion(), templateDetails.getHypervisorArchLog());
+ VMTemplateVO templateVO = vmTemplateDao.findLatestTemplateByTypeAndHypervisorAndArch(
+ templateDetails.getHypervisorType(), templateDetails.getArch(), Storage.TemplateType.SYSTEM);
+ if (templateVO != null) {
+ updateTemplateUrlChecksumAndGuestOsId(templateVO, templateDetails);
+ }
+ return false;
+ }
+ try {
+ registerTemplates(hypervisorArchInUse);
+ return true;
+ } catch (final Exception e) {
+ throw new CloudRuntimeException(String.format("Failed to register %s templates for hypervisors: [%s]. " +
+ "Cannot upgrade system VMs",
+ getSystemVmTemplateVersion(),
+ StringUtils.join(hypervisorArchInUse.stream()
+ .map(x -> String.format("%s-%s", x.first().name(), x.second().name()))
+ .collect(Collectors.toList()), ",")), e);
+ }
+ }
+
+ /**
+ * Return NFS version for the store: store-specific config if present
+ * or global config if absent. Returns null if not set.
+ */
+ protected String getNfsVersion(long storeId) {
+ final String configKey = "secstorage.nfs.version";
+ final Map storeDetails = imageStoreDetailsDao.getDetails(storeId);
+ if (storeDetails != null && storeDetails.containsKey(configKey)) {
+ return storeDetails.get(configKey);
+ }
+ ConfigurationVO globalNfsVersion = configurationDao.findByName(configKey);
+ if (globalNfsVersion != null) {
+ return globalNfsVersion.getValue();
+ }
+ return null;
+ }
+
+ /**
+ * Validate metadata for the given template's hypervisor/arch and add the existing template
+ * to the specified secondary store. On success, database entries are created/updated.
+ *
+ * @param templateVO template to add
+ * @param templateDataStoreVO existing template-store mapping; may be null
+ * @param zoneId zone id where the operation is performed
+ * @param storeId target image store id
+ * @param filePath temporary mount path for the store
+ * @throws CloudRuntimeException on failure; the method attempts rollback/cleanup
+ */
+ public void validateAndAddTemplateToStore(VMTemplateVO templateVO, TemplateDataStoreVO templateDataStoreVO,
+ long zoneId, long storeId, String filePath) {
+ MetadataTemplateDetails templateDetails = getValidatedTemplateDetailsForHypervisorAndArch(
+ templateVO.getHypervisorType(), templateVO.getArch());
+ addExistingTemplateToStore(templateVO, templateDetails, templateDataStoreVO, zoneId, storeId, filePath);
+ }
+
+ /**
+ * Validate metadata for the given hypervisor/arch and register a new system VM Template
+ * on the specified store and zone. Creates DB entries and seeds the template on the store.
+ *
+ * @param hypervisor hypervisor type
+ * @param arch cpu architecture
+ * @param name template name to register
+ * @param zoneId zone id where the operation is performed
+ * @param storeId target image store id
+ * @param filePath temporary mount path for the store
+ * @throws CloudRuntimeException on failure; the method attempts rollback/cleanup
+ */
+ public void validateAndRegisterNewTemplate(Hypervisor.HypervisorType hypervisor, CPU.CPUArch arch, String name,
+ long zoneId, long storeId, String filePath) {
+ MetadataTemplateDetails templateDetails = getValidatedTemplateDetailsForHypervisorAndArch(hypervisor, arch);
+ registerNewTemplate(name, templateDetails, zoneId, storeId, filePath);
+ }
+
+ /**
+ * Check whether the template at the given `path` on NFS `url` is already seeded.
+ * If found, updates DB with sizes and returns true; otherwise returns false.
+ *
+ * @throws CloudRuntimeException on any error
+ */
+ public boolean validateIfSeeded(TemplateDataStoreVO templDataStoreVO, String url, String path, String nfsVersion) {
+ String filePath = null;
+ try {
+ filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString();
+ if (filePath == null) {
+ throw new CloudRuntimeException("Failed to create temporary directory to mount secondary store");
+ }
+ mountStore(url, filePath, nfsVersion);
+ int lastIdx = path.lastIndexOf(File.separator);
+ String partialDirPath = path.substring(0, lastIdx);
+ String templatePath = filePath + File.separator + partialDirPath;
+ File templateProps = new File(templatePath + "/template.properties");
+ if (templateProps.exists()) {
+ Pair templateSizes = readTemplatePropertiesSizes(templatePath + "/template.properties");
+ updateSeededTemplateDetails(templDataStoreVO.getTemplateId(), templDataStoreVO.getDataStoreId(),
+ templateSizes.first(), templateSizes.second());
+ LOGGER.info("System VM template already seeded, skipping registration");
+ return true;
+ }
+ LOGGER.info("System VM template not seeded");
+ return false;
+ } catch (Exception e) {
+ LOGGER.error("Failed to verify if the template is seeded", e);
+ throw new CloudRuntimeException("Failed to verify if the template is seeded", e);
+ } finally {
+ unmountStore(filePath);
+ try {
+ Files.delete(Path.of(filePath));
+ } catch (IOException e) {
+ LOGGER.error("Failed to delete temporary directory: {}", filePath);
}
}
- return false;
}
+ /**
+ * Finds a registered system VM Template matching the provided criteria.
+ *
+ * The method first attempts to locate the latest template by {@code templateName},
+ * {@code hypervisorType} and {@code arch}. If none is found and a non-blank {@code url}
+ * is provided, it falls back to searching for an active system template by the
+ * URL path segment (the substring after the last '/' in the URL).
+ *
+ * @param templateName the template name to search for
+ * @param hypervisorType the hypervisor type
+ * @param arch the CPU architecture
+ * @param url optional download URL used as a fallback; may be {@code null} or blank
+ * @return the matching {@code VMTemplateVO} if found; {@code null} otherwise
+ */
+ public VMTemplateVO getRegisteredTemplate(String templateName, Hypervisor.HypervisorType hypervisorType,
+ CPU.CPUArch arch, String url) {
+ VMTemplateVO registeredTemplate = vmTemplateDao.findLatestTemplateByName(templateName, hypervisorType, arch);
+ if (registeredTemplate == null && StringUtils.isNotBlank(url)) {
+ String urlPath = url.substring(url.lastIndexOf("/") + 1);
+ LOGGER.debug("No template found by name, falling back to search existing SYSTEM template by " +
+ "urlPath: {}, hypervisor: {}, arch:{}", urlPath, hypervisorType, arch);
+ registeredTemplate = vmTemplateDao.findActiveSystemTemplateByHypervisorArchAndUrlPath(hypervisorType, arch,
+ urlPath);
+ }
+ LOGGER.debug("Found existing registered template for hypervisor: {}, arch: {}: {}", hypervisorType,
+ arch, registeredTemplate);
+ return registeredTemplate;
+ }
+
+ /**
+ * Update or register system VM Templates based on metadata.
+ * Runs the registration logic inside a database transaction: obtains the
+ * set of hypervisors/architectures in use, iterates over metadata entries
+ * and attempts to register or update each template.
+ *
+ * @param conn retained for compatibility with callers (not used directly)
+ */
public void updateSystemVmTemplates(final Connection conn) {
- LOGGER.debug("Updating System Vm template IDs");
- updateSystemVmTemplateGuestOsId();
+ LOGGER.debug("Updating System VM templates");
+ updateHypervisorGuestOsMap();
Transaction.execute(new TransactionCallbackNoReturn() {
@Override
public void doInTransactionWithoutResult(final TransactionStatus status) {
@@ -1069,10 +1269,9 @@ public void doInTransactionWithoutResult(final TransactionStatus status) {
} catch (final Exception e) {
throw new CloudRuntimeException("Exception while getting hypervisor types from clusters", e);
}
- Collection templateEntries = NewTemplateMap.values();
- for (MetadataTemplateDetails templateDetails : templateEntries) {
+ for (MetadataTemplateDetails templateDetails : METADATA_TEMPLATE_LIST) {
try {
- if (registerOrUpdateSystemVmTemplate(templateDetails, hypervisorsInUse)) {
+ if (updateOrRegisterSystemVmTemplate(templateDetails, hypervisorsInUse)) {
break;
}
} catch (final Exception e) {
@@ -1081,24 +1280,11 @@ public void doInTransactionWithoutResult(final TransactionStatus status) {
throw new CloudRuntimeException(errMsg, e);
}
}
- LOGGER.debug("Updating System Vm Template IDs Complete");
+ LOGGER.debug("Updating System VM Templates Complete");
}
});
}
- public String getNfsVersion(long storeId) {
- final String configKey = "secstorage.nfs.version";
- final Map storeDetails = imageStoreDetailsDao.getDetails(storeId);
- if (storeDetails != null && storeDetails.containsKey(configKey)) {
- return storeDetails.get(configKey);
- }
- ConfigurationVO globalNfsVersion = configurationDao.findByName(configKey);
- if (globalNfsVersion != null) {
- return globalNfsVersion.getValue();
- }
- return null;
- }
-
protected static class MetadataTemplateDetails {
private final Hypervisor.HypervisorType hypervisorType;
private final String name;
@@ -1160,6 +1346,16 @@ public String getDefaultFilePath() {
return TEMPLATES_PATH + filename;
}
+ public boolean isFileChecksumDifferent(File file) {
+ String fileChecksum = DigestHelper.calculateChecksum(file);
+ if (!fileChecksum.equals(getChecksum())) {
+ LOGGER.error("Checksum {} for file {} does not match checksum {} from metadata",
+ fileChecksum, file, getChecksum());
+ return true;
+ }
+ return false;
+ }
+
public String getHypervisorArchLog() {
return SystemVmTemplateRegistration.getHypervisorArchLog(hypervisorType, arch);
}
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java
index 524b6a34893b..d4cdbcb9707d 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java
@@ -77,8 +77,6 @@ public void performDataMigration(Connection conn) {
encryptData(conn);
// drop keys
dropKeysIfExist(conn);
- //update template ID for system Vms
- //updateSystemVms(conn); This is not required as system template update is handled during 4.2 upgrade
// update domain network ref
updateDomainNetworkRef(conn);
// update networks that use redundant routers to the new network offering
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java
index aa427252585f..bd8ddaa7c498 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java
@@ -62,7 +62,6 @@ public InputStream[] getPrepareScripts() {
@Override
public void performDataMigration(Connection conn) {
- //updateVmWareSystemVms(conn); This is not required as system template update is handled during 4.2 upgrade
correctVRProviders(conn);
correctMultiplePhysicaNetworkSetups(conn);
addHostDetailsUniqueKey(conn);
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java
index 3167dd8115b4..38dc90b460dd 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java
@@ -65,7 +65,6 @@ public void performDataMigration(Connection conn) {
addVpcProvider(conn);
updateRouterNetworkRef(conn);
fixZoneUsingExternalDevices(conn);
-// updateSystemVms(conn);
fixForeignKeys(conn);
encryptClusterDetails(conn);
}
@@ -81,54 +80,6 @@ public InputStream[] getCleanupScripts() {
return new InputStream[] {script};
}
- private void updateSystemVms(Connection conn) {
- PreparedStatement pstmt = null;
- ResultSet rs = null;
- boolean VMware = false;
- try {
- pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null");
- rs = pstmt.executeQuery();
- while (rs.next()) {
- if ("VMware".equals(rs.getString(1))) {
- VMware = true;
- }
- }
- } catch (SQLException e) {
- throw new CloudRuntimeException("Error while iterating through list of hypervisors in use", e);
- }
- // Just update the VMware system template. Other hypervisor templates are unchanged from previous 3.0.x versions.
- logger.debug("Updating VMware System Vms");
- try {
- //Get 3.0.5 VMware system Vm template Id
- pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = 'systemvm-vmware-3.0.5' and removed is null");
- rs = pstmt.executeQuery();
- if (rs.next()) {
- long templateId = rs.getLong(1);
- rs.close();
- pstmt.close();
- // change template type to SYSTEM
- pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?");
- pstmt.setLong(1, templateId);
- pstmt.executeUpdate();
- pstmt.close();
- // update template ID of system Vms
- pstmt = conn.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = 'VMware'");
- pstmt.setLong(1, templateId);
- pstmt.executeUpdate();
- pstmt.close();
- } else {
- if (VMware) {
- throw new CloudRuntimeException("3.0.5 VMware SystemVm Template not found. Cannot upgrade system Vms");
- } else {
- logger.warn("3.0.5 VMware SystemVm Template not found. VMware hypervisor is not used, so not failing upgrade");
- }
- }
- } catch (SQLException e) {
- throw new CloudRuntimeException("Error while updating VMware systemVM Template", e);
- }
- logger.debug("Updating System VM Template IDs Complete");
- }
-
private void addVpcProvider(Connection conn) {
//Encrypt config params and change category to Hidden
logger.debug("Adding VPC provider to all physical Networks in the system");
diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java
index 94e6149e73b2..a66aa69798aa 100644
--- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java
@@ -45,6 +45,10 @@
public class Upgrade410to420 extends DbUpgradeAbstractImpl {
+ public static final String UNABLE_TO_PERSIST_VSWITCH_CONFIGURATION_OF_VMWARE_CLUSTERS = "Unable to persist vswitch configuration of VMware clusters.";
+ public static final String INSERT_MODIFIED_ROWS = "Insert modified {} rows";
+ public static final String UPDATE_MODIFIED_ROWS = "Update modified {} rows";
+
@Override
public String[] getUpgradableVersionRange() {
return new String[] {"4.1.0", "4.2.0"};
@@ -55,11 +59,6 @@ public String getUpgradedVersion() {
return "4.2.0";
}
- @Override
- public boolean supportsRollingUpgrade() {
- return false;
- }
-
@Override
public InputStream[] getPrepareScripts() {
final String scriptFile = "META-INF/db/schema-410to420.sql";
@@ -117,12 +116,12 @@ public void performDataMigration(Connection conn) {
private void createFullCloneFlag(Connection conn) {
String update_sql;
int numRows = 0;
- try (PreparedStatement delete = conn.prepareStatement("delete from `cloud`.`configuration` where name='vmware.create.full.clone';");)
+ try (PreparedStatement delete = conn.prepareStatement("delete from `cloud`.`configuration` where name='vmware.create.full.clone';"))
{
delete.executeUpdate();
- try(PreparedStatement query = conn.prepareStatement("select count(*) from `cloud`.`data_center`");)
+ try(PreparedStatement query = conn.prepareStatement("select count(*) from `cloud`.`data_center`"))
{
- try(ResultSet rs = query.executeQuery();) {
+ try(ResultSet rs = query.executeQuery()) {
if (rs.next()) {
numRows = rs.getInt(1);
}
@@ -131,7 +130,7 @@ private void createFullCloneFlag(Connection conn) {
} else {
update_sql = "insert into `cloud`.`configuration` (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Advanced', 'DEFAULT', 'UserVmManager', 'vmware.create.full.clone' , 'true', 'If set to true, creates VMs as full clones on ESX hypervisor');";
}
- try(PreparedStatement update_pstmt = conn.prepareStatement(update_sql);) {
+ try(PreparedStatement update_pstmt = conn.prepareStatement(update_sql)) {
update_pstmt.executeUpdate();
}catch (SQLException e) {
throw new CloudRuntimeException("Failed to set global flag vmware.create.full.clone: ", e);
@@ -148,7 +147,7 @@ private void createFullCloneFlag(Connection conn) {
}
private void migrateVolumeOnSecondaryStorage(Connection conn) {
- try (PreparedStatement sql = conn.prepareStatement("update `cloud`.`volumes` set state='Uploaded' where state='UploadOp'");){
+ try (PreparedStatement sql = conn.prepareStatement("update `cloud`.`volumes` set state='Uploaded' where state='UploadOp'")){
sql.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("Failed to upgrade volume state: ", e);
@@ -156,7 +155,7 @@ private void migrateVolumeOnSecondaryStorage(Connection conn) {
}
private void persistVswitchConfiguration(Connection conn) {
- Long clusterId;
+ long clusterId;
String clusterHypervisorType;
final String NEXUS_GLOBAL_CONFIG_PARAM_NAME = "vmware.use.nexus.vswitch";
final String DVS_GLOBAL_CONFIG_PARAM_NAME = "vmware.use.dvswitch";
@@ -168,10 +167,10 @@ private void persistVswitchConfiguration(Connection conn) {
boolean nexusEnabled = false;
String publicVswitchType = VMWARE_STANDARD_VSWITCH;
String guestVswitchType = VMWARE_STANDARD_VSWITCH;
- Map>> detailsMap = new HashMap>>();
+ Map>> detailsMap = new HashMap<>();
List> detailsList;
- try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL");){
- try(ResultSet clusters = clustersQuery.executeQuery();) {
+ try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL")){
+ try(ResultSet clusters = clustersQuery.executeQuery()) {
while (clusters.next()) {
clusterHypervisorType = clusters.getString("hypervisor_type");
clusterId = clusters.getLong("id");
@@ -186,20 +185,19 @@ private void persistVswitchConfiguration(Connection conn) {
publicVswitchType = NEXUS_1000V_DVSWITCH;
guestVswitchType = NEXUS_1000V_DVSWITCH;
}
- detailsList = new ArrayList>();
- detailsList.add(new Pair(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, guestVswitchType));
- detailsList.add(new Pair(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, publicVswitchType));
+ detailsList = new ArrayList<>();
+ detailsList.add(new Pair<>(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, guestVswitchType));
+ detailsList.add(new Pair<>(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, publicVswitchType));
detailsMap.put(clusterId, detailsList);
updateClusterDetails(conn, detailsMap);
- logger.debug("Persist vSwitch Configuration: Successfully persisted vswitch configuration for cluster " + clusterId);
+ logger.debug("Persist vSwitch Configuration: Successfully persisted vswitch configuration for cluster {}", clusterId);
} else {
- logger.debug("Persist vSwitch Configuration: Ignoring cluster " + clusterId + " with hypervisor type " + clusterHypervisorType);
- continue;
+ logger.debug("Persist vSwitch Configuration: Ignoring cluster {} with hypervisor type {}", clusterId, clusterHypervisorType);
}
} // End cluster iteration
- }catch (SQLException e) {
- String msg = "Unable to persist vswitch configuration of VMware clusters." + e.getMessage();
+ } catch (SQLException e) {
+ String msg = UNABLE_TO_PERSIST_VSWITCH_CONFIGURATION_OF_VMWARE_CLUSTERS + e.getMessage();
logger.error(msg);
throw new CloudRuntimeException(msg, e);
}
@@ -209,10 +207,11 @@ private void persistVswitchConfiguration(Connection conn) {
setConfigurationParameter(conn, VSWITCH_GLOBAL_CONFIG_PARAM_CATEGORY, DVS_GLOBAL_CONFIG_PARAM_NAME, "true");
}
} catch (SQLException e) {
- String msg = "Unable to persist vswitch configuration of VMware clusters." + e.getMessage();
+ String msg = UNABLE_TO_PERSIST_VSWITCH_CONFIGURATION_OF_VMWARE_CLUSTERS + e.getMessage();
logger.error(msg);
throw new CloudRuntimeException(msg, e);
}
+
}
private void updateClusterDetails(Connection conn, Map>> detailsMap) {
@@ -227,7 +226,7 @@ private void updateClusterDetails(Connection conn, Map keyValuePair : keyValues) {
key = keyValuePair.first();
val = keyValuePair.second();
@@ -236,7 +235,7 @@ private void updateClusterDetails(Connection conn, Map keys = new ArrayList();
+ List keys = new ArrayList<>();
keys.add("fk_external_dhcp_devices_nsp_id");
keys.add("fk_external_dhcp_devices_host_id");
keys.add("fk_external_dhcp_devices_pod_id");
@@ -397,15 +395,15 @@ private void fixBaremetalForeignKeys(Connection conn) {
keys.add("fk_external_pxe_devices_physical_network_id");
DbUpgradeUtils.dropKeysIfExist(conn, "baremetal_pxe_devices", keys, true);
- try (PreparedStatement alter_pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE");)
+ try (PreparedStatement alter_pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE"))
{
alter_pstmt.executeUpdate();
try(PreparedStatement alter_pstmt_id =
- conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE");
+ conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE")
) {
alter_pstmt_id.executeUpdate();
try(PreparedStatement alter_pstmt_phy_net =
- conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE");)
+ conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE"))
{
alter_pstmt_phy_net.executeUpdate();
}catch (SQLException e) {
@@ -419,14 +417,14 @@ private void fixBaremetalForeignKeys(Connection conn) {
throw new CloudRuntimeException("Unable to add foreign keys to baremetal_dhcp_devices table", e);
}
try (PreparedStatement alter_pxe_pstmt =
- conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE");)
+ conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE"))
{
alter_pxe_pstmt.executeUpdate();
try(PreparedStatement alter_pxe_id_pstmt =
- conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE");) {
+ conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE")) {
alter_pxe_id_pstmt.executeUpdate();
try(PreparedStatement alter_pxe_phy_net_pstmt =
- conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE");) {
+ conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE")) {
alter_pxe_phy_net_pstmt.executeUpdate();
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to add foreign keys to baremetal_pxe_devices table", e);
@@ -442,13 +440,13 @@ private void fixBaremetalForeignKeys(Connection conn) {
private void addIndexForAlert(Connection conn) {
//First drop if it exists. (Due to patches shipped to customers some will have the index and some won't.)
- List indexList = new ArrayList();
+ List indexList = new ArrayList<>();
logger.debug("Dropping index i_alert__last_sent if it exists");
indexList.add("last_sent"); // in 4.1, we created this index that is not in convention.
indexList.add("i_alert__last_sent");
DbUpgradeUtils.dropKeysIfExist(conn, "alert", indexList, false);
//Now add index.
- try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`alert` ADD INDEX `i_alert__last_sent`(`last_sent`)");)
+ try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`alert` ADD INDEX `i_alert__last_sent`(`last_sent`)"))
{
pstmt.executeUpdate();
logger.debug("Added index i_alert__last_sent for table alert");
@@ -457,76 +455,19 @@ private void addIndexForAlert(Connection conn) {
}
}
- private void dropUploadTable(Connection conn) {
- try(PreparedStatement pstmt0 = conn.prepareStatement("SELECT url, created, type_id, host_id from upload where type=?");) {
- // Read upload table - Templates
- logger.debug("Populating template_store_ref table");
- pstmt0.setString(1, "TEMPLATE");
- try(ResultSet rs0 = pstmt0.executeQuery();)
- {
- try(PreparedStatement pstmt1 = conn.prepareStatement("UPDATE template_store_ref SET download_url=?, download_url_created=? where template_id=? and store_id=?");) {
- //Update template_store_ref
- while (rs0.next()) {
- pstmt1.setString(1, rs0.getString("url"));
- pstmt1.setDate(2, rs0.getDate("created"));
- pstmt1.setLong(3, rs0.getLong("type_id"));
- pstmt1.setLong(4, rs0.getLong("host_id"));
- pstmt1.executeUpdate();
- }
- // Read upload table - Volumes
- logger.debug("Populating volume store ref table");
- try(PreparedStatement pstmt2 = conn.prepareStatement("SELECT url, created, type_id, host_id, install_path from upload where type=?");) {
- pstmt2.setString(1, "VOLUME");
- try(ResultSet rs2 = pstmt2.executeQuery();) {
-
- try(PreparedStatement pstmt3 =
- conn.prepareStatement("INSERT IGNORE INTO volume_store_ref (volume_id, store_id, zone_id, created, state, download_url, download_url_created, install_path) VALUES (?,?,?,?,?,?,?,?)");) {
- //insert into template_store_ref
- while (rs2.next()) {
- pstmt3.setLong(1, rs2.getLong("type_id"));
- pstmt3.setLong(2, rs2.getLong("host_id"));
- pstmt3.setLong(3, 1l);// ???
- pstmt3.setDate(4, rs2.getDate("created"));
- pstmt3.setString(5, "Ready");
- pstmt3.setString(6, rs2.getString("url"));
- pstmt3.setDate(7, rs2.getDate("created"));
- pstmt3.setString(8, rs2.getString("install_path"));
- pstmt3.executeUpdate();
- }
- }catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
- }catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
- }catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
- }catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
- }catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
-
- } catch (SQLException e) {
- throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e);
- }
- }
-
//KVM snapshot flag: only turn on if Customers is using snapshot;
private void setKVMSnapshotFlag(Connection conn) {
logger.debug("Verify and set the KVM snapshot flag if snapshot was used. ");
- try(PreparedStatement pstmt = conn.prepareStatement("select count(*) from `cloud`.`snapshots` where hypervisor_type = 'KVM'");)
+ try(PreparedStatement pstmt = conn.prepareStatement("select count(*) from `cloud`.`snapshots` where hypervisor_type = 'KVM'"))
{
int numRows = 0;
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
if (rs.next()) {
numRows = rs.getInt(1);
}
if (numRows > 0) {
//Add the configuration flag
- try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = 'kvm.snapshot.enabled'");) {
+ try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = 'kvm.snapshot.enabled'")) {
update_pstmt.setString(1, "true");
update_pstmt.executeUpdate();
}catch (SQLException e) {
@@ -543,19 +484,19 @@ private void setKVMSnapshotFlag(Connection conn) {
}
private void updatePrimaryStore(Connection conn) {
- try(PreparedStatement sql = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type = 'Filesystem' or pool_type = 'LVM'");) {
+ try(PreparedStatement sql = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type = 'Filesystem' or pool_type = 'LVM'")) {
sql.setString(1, DataStoreProvider.DEFAULT_PRIMARY);
sql.setString(2, "HOST");
sql.executeUpdate();
- try(PreparedStatement sql2 = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type != 'Filesystem' and pool_type != 'LVM'");) {
+ try(PreparedStatement sql2 = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type != 'Filesystem' and pool_type != 'LVM'")) {
sql2.setString(1, DataStoreProvider.DEFAULT_PRIMARY);
sql2.setString(2, "CLUSTER");
sql2.executeUpdate();
}catch (SQLException e) {
- throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e.toString());
+ throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e, e);
}
} catch (SQLException e) {
- throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e.toString());
+ throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e, e);
}
}
@@ -565,20 +506,20 @@ private void updateOverCommitRatioClusterDetails(Connection conn) {
PreparedStatement pstmt = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` WHERE removed IS NULL");
PreparedStatement pstmt1 = conn.prepareStatement("INSERT INTO `cloud`.`cluster_details` (cluster_id, name, value) VALUES(?, 'cpuOvercommitRatio', ?)");
PreparedStatement pstmt2 = conn.prepareStatement("INSERT INTO `cloud`.`cluster_details` (cluster_id, name, value) VALUES(?, 'memoryOvercommitRatio', ?)");
- PreparedStatement pstmt3 = conn.prepareStatement("select value from `cloud`.`configuration` where name=?");) {
+ PreparedStatement pstmt3 = conn.prepareStatement("select value from `cloud`.`configuration` where name=?")) {
String global_cpu_overprovisioning_factor = "1";
String global_mem_overprovisioning_factor = "1";
pstmt3.setString(1, "cpu.overprovisioning.factor");
- try (ResultSet rscpu_global = pstmt3.executeQuery();) {
+ try (ResultSet rscpu_global = pstmt3.executeQuery()) {
if (rscpu_global.next())
global_cpu_overprovisioning_factor = rscpu_global.getString(1);
}
pstmt3.setString(1, "mem.overprovisioning.factor");
- try (ResultSet rsmem_global = pstmt3.executeQuery();) {
+ try (ResultSet rsmem_global = pstmt3.executeQuery()) {
if (rsmem_global.next())
global_mem_overprovisioning_factor = rsmem_global.getString(1);
}
- try (ResultSet rs1 = pstmt.executeQuery();) {
+ try (ResultSet rs1 = pstmt.executeQuery()) {
while (rs1.next()) {
long id = rs1.getLong(1);
String hypervisor_type = rs1.getString(2);
@@ -643,29 +584,34 @@ private void upgradeVmwareLabels(Connection conn) {
String trafficTypeVswitchParamValue;
try (PreparedStatement pstmt =
- conn.prepareStatement("select name,value from `cloud`.`configuration` where category='Hidden' and value is not NULL and name REGEXP 'vmware*.vswitch';");)
+ conn.prepareStatement("select name,value from `cloud`.`configuration` where category='Hidden' and value is not NULL and name REGEXP 'vmware*.vswitch';"))
{
// update the existing vmware traffic labels
- try(ResultSet rsParams = pstmt.executeQuery();) {
+ try(ResultSet rsParams = pstmt.executeQuery()) {
while (rsParams.next()) {
trafficTypeVswitchParam = rsParams.getString("name");
trafficTypeVswitchParamValue = rsParams.getString("value");
// When upgraded from 4.0 to 4.1 update physical network traffic label with trafficTypeVswitchParam
- if (trafficTypeVswitchParam.equals("vmware.private.vswitch")) {
- trafficType = "Management"; //TODO(sateesh): Ignore storage traffic, as required physical network already implemented, anything else tobe done?
- } else if (trafficTypeVswitchParam.equals("vmware.public.vswitch")) {
- trafficType = "Public";
- } else if (trafficTypeVswitchParam.equals("vmware.guest.vswitch")) {
- trafficType = "Guest";
+ switch (trafficTypeVswitchParam) {
+ case "vmware.private.vswitch":
+ trafficType = "Management"; //TODO(sateesh): Ignore storage traffic, as required physical network already implemented, anything else tobe done?
+
+ break;
+ case "vmware.public.vswitch":
+ trafficType = "Public";
+ break;
+ case "vmware.guest.vswitch":
+ trafficType = "Guest";
+ break;
}
try(PreparedStatement sel_pstmt =
- conn.prepareStatement("select physical_network_id, traffic_type, vmware_network_label from physical_network_traffic_types where vmware_network_label is not NULL and traffic_type=?;");) {
+ conn.prepareStatement("select physical_network_id, traffic_type, vmware_network_label from physical_network_traffic_types where vmware_network_label is not NULL and traffic_type=?;")) {
pstmt.setString(1, trafficType);
- try(ResultSet rsLabel = sel_pstmt.executeQuery();) {
+ try(ResultSet rsLabel = sel_pstmt.executeQuery()) {
newLabel = getNewLabel(rsLabel, trafficTypeVswitchParamValue);
try(PreparedStatement update_pstmt =
- conn.prepareStatement("update physical_network_traffic_types set vmware_network_label = ? where traffic_type = ? and vmware_network_label is not NULL;");) {
- logger.debug("Updating VMware label for " + trafficType + " traffic. Update SQL statement is " + pstmt);
+ conn.prepareStatement("update physical_network_traffic_types set vmware_network_label = ? where traffic_type = ? and vmware_network_label is not NULL;")) {
+ logger.debug("Updating VMware label for {} traffic. Update SQL statement is {}", trafficType, pstmt);
pstmt.setString(1, newLabel);
pstmt.setString(2, trafficType);
update_pstmt.executeUpdate();
@@ -688,17 +634,17 @@ private void upgradeVmwareLabels(Connection conn) {
}
private void persistLegacyZones(Connection conn) {
- List listOfLegacyZones = new ArrayList();
- List listOfNonLegacyZones = new ArrayList();
- Map> dcToZoneMap = new HashMap>();
+ List listOfLegacyZones = new ArrayList<>();
+ List listOfNonLegacyZones = new ArrayList<>();
+ Map> dcToZoneMap = new HashMap<>();
ResultSet clusters = null;
Long zoneId;
- Long clusterId;
+ long clusterId;
ArrayList dcList = null;
String clusterHypervisorType;
boolean legacyZone;
boolean ignoreZone;
- Long count;
+ long count;
String dcOfPreviousCluster = null;
String dcOfCurrentCluster = null;
String[] tokens;
@@ -706,15 +652,15 @@ private void persistLegacyZones(Connection conn) {
String vc = "";
String dcName = "";
- try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`data_center` where removed is NULL");) {
- try (ResultSet rs = pstmt.executeQuery();) {
+ try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`data_center` where removed is NULL")) {
+ try (ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
zoneId = rs.getLong("id");
- try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL AND data_center_id=?");) {
+ try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL AND data_center_id=?")) {
clustersQuery.setLong(1, zoneId);
legacyZone = false;
ignoreZone = true;
- dcList = new ArrayList();
+ dcList = new ArrayList<>();
count = 0L;
// Legacy zone term is meant only for VMware
// Legacy zone is a zone with at least 2 clusters & with multiple DCs or VCs
@@ -730,9 +676,9 @@ private void persistLegacyZones(Connection conn) {
if (clusterHypervisorType.equalsIgnoreCase("VMware")) {
ignoreZone = false;
try (PreparedStatement clusterDetailsQuery = conn
- .prepareStatement("select value from `cloud`.`cluster_details` where name='url' and cluster_id=?");) {
+ .prepareStatement("select value from `cloud`.`cluster_details` where name='url' and cluster_id=?")) {
clusterDetailsQuery.setLong(1, clusterId);
- try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery();) {
+ try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery()) {
clusterDetails.next();
url = clusterDetails.getString("value");
tokens = url.split("/"); // url format - http://vcenter/dc/cluster
@@ -746,7 +692,7 @@ private void persistLegacyZones(Connection conn) {
if (count > 0) {
if (!dcOfPreviousCluster.equalsIgnoreCase(dcOfCurrentCluster)) {
legacyZone = true;
- logger.debug("Marking the zone " + zoneId + " as legacy zone.");
+ logger.debug("Marking the zone {} as legacy zone.", zoneId);
}
}
} catch (SQLException e) {
@@ -756,7 +702,7 @@ private void persistLegacyZones(Connection conn) {
throw new CloudRuntimeException("Unable add zones to cloud.legacyzones table.", e);
}
} else {
- logger.debug("Ignoring zone " + zoneId + " with hypervisor type " + clusterHypervisorType);
+ logger.debug("Ignoring zone {} with hypervisor type {}", zoneId, clusterHypervisorType);
break;
}
count++;
@@ -774,7 +720,7 @@ private void persistLegacyZones(Connection conn) {
listOfNonLegacyZones.add(zoneId);
}
for (String dc : dcList) {
- ArrayList dcZones = new ArrayList();
+ ArrayList dcZones = new ArrayList<>();
if (dcToZoneMap.get(dc) != null) {
dcZones = dcToZoneMap.get(dc);
}
@@ -796,22 +742,22 @@ private void persistLegacyZones(Connection conn) {
updateLegacyZones(conn, listOfLegacyZones);
updateNonLegacyZones(conn, listOfNonLegacyZones);
} catch (SQLException e) {
- logger.error("Unable to discover legacy zones." + e.getMessage(),e);
+ logger.error("Unable to discover legacy zones.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to discover legacy zones." + e.getMessage(), e);
}
}catch (SQLException e) {
- logger.error("Unable to discover legacy zones." + e.getMessage(),e);
+ logger.error("Unable to discover legacy zones.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to discover legacy zones." + e.getMessage(), e);
}
}
private void updateLegacyZones(Connection conn, List zones) {
//Insert legacy zones into table for legacy zones.
- try (PreparedStatement legacyZonesQuery = conn.prepareStatement("INSERT INTO `cloud`.`legacy_zones` (zone_id) VALUES (?)");){
+ try (PreparedStatement legacyZonesQuery = conn.prepareStatement("INSERT INTO `cloud`.`legacy_zones` (zone_id) VALUES (?)")){
for (Long zoneId : zones) {
legacyZonesQuery.setLong(1, zoneId);
legacyZonesQuery.executeUpdate();
- logger.debug("Inserted zone " + zoneId + " into cloud.legacyzones table");
+ logger.debug("Inserted zone {} into cloud.legacyzones table", zoneId);
}
} catch (SQLException e) {
throw new CloudRuntimeException("Unable add zones to cloud.legacyzones table.", e);
@@ -821,22 +767,22 @@ private void updateLegacyZones(Connection conn, List zones) {
private void updateNonLegacyZones(Connection conn, List zones) {
try {
for (Long zoneId : zones) {
- logger.debug("Discovered non-legacy zone " + zoneId + ". Processing the zone to associate with VMware datacenter.");
+ logger.debug("Discovered non-legacy zone {}. Processing the zone to associate with VMware datacenter.", zoneId);
// All clusters in a non legacy zone will belong to the same VMware DC, hence pick the first cluster
- try (PreparedStatement clustersQuery = conn.prepareStatement("select id from `cloud`.`cluster` where removed is NULL AND data_center_id=?");) {
+ try (PreparedStatement clustersQuery = conn.prepareStatement("select id from `cloud`.`cluster` where removed is NULL AND data_center_id=?")) {
clustersQuery.setLong(1, zoneId);
- try (ResultSet clusters = clustersQuery.executeQuery();) {
+ try (ResultSet clusters = clustersQuery.executeQuery()) {
clusters.next();
- Long clusterId = clusters.getLong("id");
+ long clusterId = clusters.getLong("id");
// Get VMware datacenter details from cluster_details table
String user = null;
String password = null;
String url = null;
- try (PreparedStatement clusterDetailsQuery = conn.prepareStatement("select name, value from `cloud`.`cluster_details` where cluster_id=?");) {
+ try (PreparedStatement clusterDetailsQuery = conn.prepareStatement("select name, value from `cloud`.`cluster_details` where cluster_id=?")) {
clusterDetailsQuery.setLong(1, clusterId);
- try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery();) {
+ try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery()) {
while (clusterDetails.next()) {
String key = clusterDetails.getString(1);
String value = clusterDetails.getString(2);
@@ -854,7 +800,7 @@ private void updateNonLegacyZones(Connection conn, List zones) {
String guid = dcName + "@" + vc;
try (PreparedStatement insertVmWareDC = conn
- .prepareStatement("INSERT INTO `cloud`.`vmware_data_center` (uuid, name, guid, vcenter_host, username, password) values(?, ?, ?, ?, ?, ?)");) {
+ .prepareStatement("INSERT INTO `cloud`.`vmware_data_center` (uuid, name, guid, vcenter_host, username, password) values(?, ?, ?, ?, ?, ?)")) {
insertVmWareDC.setString(1, UUID.randomUUID().toString());
insertVmWareDC.setString(2, dcName);
insertVmWareDC.setString(3, guid);
@@ -863,16 +809,16 @@ private void updateNonLegacyZones(Connection conn, List zones) {
insertVmWareDC.setString(6, password);
insertVmWareDC.executeUpdate();
}
- try (PreparedStatement selectVmWareDC = conn.prepareStatement("SELECT id FROM `cloud`.`vmware_data_center` where guid=?");) {
+ try (PreparedStatement selectVmWareDC = conn.prepareStatement("SELECT id FROM `cloud`.`vmware_data_center` where guid=?")) {
selectVmWareDC.setString(1, guid);
- try (ResultSet vmWareDcInfo = selectVmWareDC.executeQuery();) {
- Long vmwareDcId = -1L;
+ try (ResultSet vmWareDcInfo = selectVmWareDC.executeQuery()) {
+ long vmwareDcId = -1L;
if (vmWareDcInfo.next()) {
vmwareDcId = vmWareDcInfo.getLong("id");
}
try (PreparedStatement insertMapping = conn
- .prepareStatement("INSERT INTO `cloud`.`vmware_data_center_zone_map` (zone_id, vmware_data_center_id) values(?, ?)");) {
+ .prepareStatement("INSERT INTO `cloud`.`vmware_data_center_zone_map` (zone_id, vmware_data_center_id) values(?, ?)")) {
insertMapping.setLong(1, zoneId);
insertMapping.setLong(2, vmwareDcId);
insertMapping.executeUpdate();
@@ -893,17 +839,17 @@ private void updateNonLegacyZones(Connection conn, List zones) {
private void createPlaceHolderNics(Connection conn) {
try (PreparedStatement pstmt =
- conn.prepareStatement("SELECT network_id, gateway, ip4_address FROM `cloud`.`nics` WHERE reserver_name IN ('DirectNetworkGuru','DirectPodBasedNetworkGuru') and vm_type='DomainRouter' AND removed IS null");)
+ conn.prepareStatement("SELECT network_id, gateway, ip4_address FROM `cloud`.`nics` WHERE reserver_name IN ('DirectNetworkGuru','DirectPodBasedNetworkGuru') and vm_type='DomainRouter' AND removed IS null"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
- Long networkId = rs.getLong(1);
+ long networkId = rs.getLong(1);
String gateway = rs.getString(2);
String ip = rs.getString(3);
String uuid = UUID.randomUUID().toString();
//Insert placeholder nic for each Domain router nic in Shared network
try(PreparedStatement insert_pstmt =
- conn.prepareStatement("INSERT INTO `cloud`.`nics` (uuid, ip4_address, gateway, network_id, state, strategy, vm_type, default_nic, created) VALUES (?, ?, ?, ?, 'Reserved', 'PlaceHolder', 'DomainRouter', 0, now())");) {
+ conn.prepareStatement("INSERT INTO `cloud`.`nics` (uuid, ip4_address, gateway, network_id, state, strategy, vm_type, default_nic, created) VALUES (?, ?, ?, ?, 'Reserved', 'PlaceHolder', 'DomainRouter', 0, now())")) {
insert_pstmt.setString(1, uuid);
insert_pstmt.setString(2, ip);
insert_pstmt.setString(3, gateway);
@@ -912,7 +858,7 @@ private void createPlaceHolderNics(Connection conn) {
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to create placeholder nics", e);
}
- logger.debug("Created placeholder nic for the ipAddress " + ip + " and network " + networkId);
+ logger.debug("Created placeholder nic for the ipAddress {} and network {}", ip, networkId);
}
}catch (SQLException e) {
throw new CloudRuntimeException("Unable to create placeholder nics", e);
@@ -923,13 +869,13 @@ private void createPlaceHolderNics(Connection conn) {
}
private void updateRemoteAccessVpn(Connection conn) {
- try(PreparedStatement pstmt = conn.prepareStatement("SELECT vpn_server_addr_id FROM `cloud`.`remote_access_vpn`");) {
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(PreparedStatement pstmt = conn.prepareStatement("SELECT vpn_server_addr_id FROM `cloud`.`remote_access_vpn`")) {
+ try(ResultSet rs = pstmt.executeQuery()) {
long id = 1;
while (rs.next()) {
String uuid = UUID.randomUUID().toString();
- Long ipId = rs.getLong(1);
- try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`remote_access_vpn` set uuid=?, id=? where vpn_server_addr_id=?");) {
+ long ipId = rs.getLong(1);
+ try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`remote_access_vpn` set uuid=?, id=? where vpn_server_addr_id=?")) {
update_pstmt.setString(1, uuid);
update_pstmt.setLong(2, id);
update_pstmt.setLong(3, ipId);
@@ -949,44 +895,44 @@ private void updateRemoteAccessVpn(Connection conn) {
private void addEgressFwRulesForSRXGuestNw(Connection conn) {
ResultSet rs = null;
- try(PreparedStatement pstmt = conn.prepareStatement("select network_id FROM `cloud`.`ntwk_service_map` where service='Firewall' and provider='JuniperSRX' ");) {
+ try(PreparedStatement pstmt = conn.prepareStatement("select network_id FROM `cloud`.`ntwk_service_map` where service='Firewall' and provider='JuniperSRX' ")) {
rs = pstmt.executeQuery();
while (rs.next()) {
long netId = rs.getLong(1);
//checking for Isolated OR Virtual
try(PreparedStatement sel_net_pstmt =
- conn.prepareStatement("select account_id, domain_id FROM `cloud`.`networks` where (guest_type='Isolated' OR guest_type='Virtual') and traffic_type='Guest' and vpc_id is NULL and (state='implemented' OR state='Shutdown') and id=? ");) {
+ conn.prepareStatement("select account_id, domain_id FROM `cloud`.`networks` where (guest_type='Isolated' OR guest_type='Virtual') and traffic_type='Guest' and vpc_id is NULL and (state='implemented' OR state='Shutdown') and id=? ")) {
sel_net_pstmt.setLong(1, netId);
logger.debug("Getting account_id, domain_id from networks table: ");
- try(ResultSet rsNw = pstmt.executeQuery();)
+ try(ResultSet rsNw = pstmt.executeQuery())
{
if (rsNw.next()) {
long accountId = rsNw.getLong(1);
long domainId = rsNw.getLong(2);
//Add new rule for the existing networks
- logger.debug("Adding default egress firewall rule for network " + netId);
+ logger.debug("Adding default egress firewall rule for network {}", netId);
try (PreparedStatement insert_pstmt =
- conn.prepareStatement("INSERT INTO firewall_rules (uuid, state, protocol, purpose, account_id, domain_id, network_id, xid, created, traffic_type) VALUES (?, 'Active', 'all', 'Firewall', ?, ?, ?, ?, now(), 'Egress')");) {
+ conn.prepareStatement("INSERT INTO firewall_rules (uuid, state, protocol, purpose, account_id, domain_id, network_id, xid, created, traffic_type) VALUES (?, 'Active', 'all', 'Firewall', ?, ?, ?, ?, now(), 'Egress')")) {
insert_pstmt.setString(1, UUID.randomUUID().toString());
insert_pstmt.setLong(2, accountId);
insert_pstmt.setLong(3, domainId);
insert_pstmt.setLong(4, netId);
insert_pstmt.setString(5, UUID.randomUUID().toString());
- logger.debug("Inserting default egress firewall rule " + insert_pstmt);
+ logger.debug("Inserting default egress firewall rule {}", insert_pstmt);
insert_pstmt.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to set egress firewall rules ", e);
}
- try (PreparedStatement sel_firewall_pstmt = conn.prepareStatement("select id from firewall_rules where protocol='all' and network_id=?");) {
+ try (PreparedStatement sel_firewall_pstmt = conn.prepareStatement("select id from firewall_rules where protocol='all' and network_id=?")) {
sel_firewall_pstmt.setLong(1, netId);
- try (ResultSet rsId = sel_firewall_pstmt.executeQuery();) {
+ try (ResultSet rsId = sel_firewall_pstmt.executeQuery()) {
long firewallRuleId;
if (rsId.next()) {
firewallRuleId = rsId.getLong(1);
- try (PreparedStatement insert_pstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')");) {
+ try (PreparedStatement insert_pstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')")) {
insert_pstmt.setLong(1, firewallRuleId);
- logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + insert_pstmt);
+ logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id={} with statement {}", firewallRuleId, insert_pstmt);
insert_pstmt.executeUpdate();
} catch (SQLException e) {
throw new CloudRuntimeException("Unable to set egress firewall rules ", e);
@@ -1008,15 +954,15 @@ private void addEgressFwRulesForSRXGuestNw(Connection conn) {
}
private void upgradeEIPNetworkOfferings(Connection conn) {
- try (PreparedStatement pstmt = conn.prepareStatement("select id, elastic_ip_service from `cloud`.`network_offerings` where traffic_type='Guest'");)
+ try (PreparedStatement pstmt = conn.prepareStatement("select id, elastic_ip_service from `cloud`.`network_offerings` where traffic_type='Guest'"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
long id = rs.getLong(1);
// check if elastic IP service is enabled for network offering
if (rs.getLong(2) != 0) {
//update network offering with eip_associate_public_ip set to true
- try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`network_offerings` set eip_associate_public_ip=? where id=?");) {
+ try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`network_offerings` set eip_associate_public_ip=? where id=?")) {
update_pstmt.setBoolean(1, true);
update_pstmt.setLong(2, id);
update_pstmt.executeUpdate();
@@ -1060,24 +1006,24 @@ private void updateNetworkACLs(Connection conn) {
PreparedStatement pstmtSelectFirewallCidrs = conn.prepareStatement(sqlSelectFirewallCidrs);
PreparedStatement pstmtDeleteFirewallCidr = conn.prepareStatement(sqlDeleteFirewallCidr);
PreparedStatement pstmtDeleteFirewallRules = conn.prepareStatement(sqlDeleteFirewallRules);
- ResultSet rsNetworkIds = pstmtSelectNetworkIds.executeQuery();) {
+ ResultSet rsNetworkIds = pstmtSelectNetworkIds.executeQuery()) {
//Get all VPC tiers
while (rsNetworkIds.next()) {
- Long networkId = rsNetworkIds.getLong(1);
- logger.debug("Updating network ACLs for network: " + networkId);
- Long vpcId = rsNetworkIds.getLong(2);
+ long networkId = rsNetworkIds.getLong(1);
+ logger.debug("Updating network ACLs for network: {}", networkId);
+ long vpcId = rsNetworkIds.getLong(2);
String tierUuid = rsNetworkIds.getString(3);
pstmtSelectFirewallRules.setLong(1, networkId);
boolean hasAcls = false;
Long aclId = null;
int number = 1;
- try (ResultSet rsAcls = pstmtSelectFirewallRules.executeQuery();) {
+ try (ResultSet rsAcls = pstmtSelectFirewallRules.executeQuery()) {
while (rsAcls.next()) {
if (!hasAcls) {
hasAcls = true;
aclId = nextAclId++;
//create ACL for the tier
- logger.debug("Creating network ACL for tier: " + tierUuid);
+ logger.debug("Creating network ACL for tier: {}", tierUuid);
pstmtInsertNetworkAcl.setLong(1, aclId);
pstmtInsertNetworkAcl.setLong(2, vpcId);
pstmtInsertNetworkAcl.setString(3, "ACL for tier " + tierUuid);
@@ -1085,13 +1031,13 @@ private void updateNetworkACLs(Connection conn) {
pstmtInsertNetworkAcl.executeUpdate();
}
- Long fwRuleId = rsAcls.getLong(1);
+ long fwRuleId = rsAcls.getLong(1);
String cidr = null;
//get cidr from firewall_rules_cidrs
pstmtSelectFirewallCidrs.setLong(1, fwRuleId);
- try (ResultSet rsCidr = pstmtSelectFirewallCidrs.executeQuery();) {
+ try (ResultSet rsCidr = pstmtSelectFirewallCidrs.executeQuery()) {
while (rsCidr.next()) {
- Long cidrId = rsCidr.getLong(1);
+ long cidrId = rsCidr.getLong(1);
String sourceCidr = rsCidr.getString(2);
if (cidr == null) {
cidr = sourceCidr;
@@ -1105,20 +1051,20 @@ private void updateNetworkACLs(Connection conn) {
}
String aclItemUuid = rsAcls.getString(2);
//Move acl to network_acl_item table
- logger.debug("Moving firewall rule: " + aclItemUuid);
+ logger.debug("Moving firewall rule: {}", aclItemUuid);
//uuid
pstmtInsertNetworkAclItem.setString(1, aclItemUuid);
//aclId
pstmtInsertNetworkAclItem.setLong(2, aclId);
//Start port
- Integer startPort = rsAcls.getInt(3);
+ int startPort = rsAcls.getInt(3);
if (rsAcls.wasNull()) {
pstmtInsertNetworkAclItem.setNull(3, Types.INTEGER);
} else {
pstmtInsertNetworkAclItem.setLong(3, startPort);
}
//End port
- Integer endPort = rsAcls.getInt(4);
+ int endPort = rsAcls.getInt(4);
if (rsAcls.wasNull()) {
pstmtInsertNetworkAclItem.setNull(4, Types.INTEGER);
} else {
@@ -1131,7 +1077,7 @@ private void updateNetworkACLs(Connection conn) {
String protocol = rsAcls.getString(6);
pstmtInsertNetworkAclItem.setString(6, protocol);
//icmp_code
- Integer icmpCode = rsAcls.getInt(7);
+ int icmpCode = rsAcls.getInt(7);
if (rsAcls.wasNull()) {
pstmtInsertNetworkAclItem.setNull(7, Types.INTEGER);
} else {
@@ -1139,7 +1085,7 @@ private void updateNetworkACLs(Connection conn) {
}
//icmp_type
- Integer icmpType = rsAcls.getInt(8);
+ int icmpType = rsAcls.getInt(8);
if (rsAcls.wasNull()) {
pstmtInsertNetworkAclItem.setNull(8, Types.INTEGER);
} else {
@@ -1183,8 +1129,8 @@ private void updateNetworkACLs(Connection conn) {
}
private void updateGlobalDeploymentPlanner(Connection conn) {
- try (PreparedStatement pstmt = conn.prepareStatement("select value from `cloud`.`configuration` where name = 'vm.allocation.algorithm'");){
- try(ResultSet rs = pstmt.executeQuery();)
+ try (PreparedStatement pstmt = conn.prepareStatement("select value from `cloud`.`configuration` where name = 'vm.allocation.algorithm'")){
+ try(ResultSet rs = pstmt.executeQuery())
{
while (rs.next()) {
String globalValue = rs.getString(1);
@@ -1195,16 +1141,12 @@ private void updateGlobalDeploymentPlanner(Connection conn) {
plannerName = "FirstFitPlanner";
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString())) {
plannerName = "FirstFitPlanner";
- } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_firstfit.toString())) {
- plannerName = "UserConcentratedPodPlanner";
- } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_random.toString())) {
- plannerName = "UserConcentratedPodPlanner";
} else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString())) {
plannerName = "UserDispersingPlanner";
}
}
// update vm.deployment.planner global config
- try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` set value=? where name = 'vm.deployment.planner'");) {
+ try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` set value=? where name = 'vm.deployment.planner'")) {
update_pstmt.setString(1, plannerName);
update_pstmt.executeUpdate();
} catch (SQLException e) {
@@ -1221,13 +1163,13 @@ private void updateGlobalDeploymentPlanner(Connection conn) {
private void upgradeDefaultVpcOffering(Connection conn) {
try(PreparedStatement pstmt =
- conn.prepareStatement("select distinct map.vpc_offering_id from `cloud`.`vpc_offering_service_map` map, `cloud`.`vpc_offerings` off where off.id=map.vpc_offering_id AND service='Lb'");)
+ conn.prepareStatement("select distinct map.vpc_offering_id from `cloud`.`vpc_offering_service_map` map, `cloud`.`vpc_offerings` off where off.id=map.vpc_offering_id AND service='Lb'"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
long id = rs.getLong(1);
//Add internal LB vm as a supported provider for the load balancer service
- try(PreparedStatement insert_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`vpc_offering_service_map` (vpc_offering_id, service, provider) VALUES (?,?,?)");) {
+ try(PreparedStatement insert_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`vpc_offering_service_map` (vpc_offering_id, service, provider) VALUES (?,?,?)")) {
insert_pstmt.setLong(1, id);
insert_pstmt.setString(2, "Lb");
insert_pstmt.setString(3, "InternalLbVm");
@@ -1245,27 +1187,27 @@ private void upgradeDefaultVpcOffering(Connection conn) {
}
private void upgradePhysicalNtwksWithInternalLbProvider(Connection conn) {
- try (PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network` where removed is null");){
- try(ResultSet rs = pstmt.executeQuery();) {
+ try (PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network` where removed is null")){
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
long pNtwkId = rs.getLong(1);
String uuid = UUID.randomUUID().toString();
//Add internal LB VM to the list of physical network service providers
try(PreparedStatement insert_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`physical_network_service_providers` "
+ "(uuid, physical_network_id, provider_name, state, load_balance_service_provided, destination_physical_network_id)"
- + " VALUES (?, ?, 'InternalLbVm', 'Enabled', 1, 0)");) {
+ + " VALUES (?, ?, 'InternalLbVm', 'Enabled', 1, 0)")) {
insert_pstmt.setString(1, uuid);
insert_pstmt.setLong(2, pNtwkId);
insert_pstmt.executeUpdate();
//Add internal lb vm to the list of physical network elements
try (PreparedStatement pstmt1 =
- conn.prepareStatement("SELECT id FROM `cloud`.`physical_network_service_providers`" + " WHERE physical_network_id=? AND provider_name='InternalLbVm'");) {
+ conn.prepareStatement("SELECT id FROM `cloud`.`physical_network_service_providers`" + " WHERE physical_network_id=? AND provider_name='InternalLbVm'")) {
pstmt1.setLong(1, pNtwkId);
- try (ResultSet rs1 = pstmt1.executeQuery();) {
+ try (ResultSet rs1 = pstmt1.executeQuery()) {
while (rs1.next()) {
long providerId = rs1.getLong(1);
uuid = UUID.randomUUID().toString();
- try(PreparedStatement insert_cloud_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`virtual_router_providers` (nsp_id, uuid, type, enabled) VALUES (?, ?, 'InternalLbVm', 1)");) {
+ try(PreparedStatement insert_cloud_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`virtual_router_providers` (nsp_id, uuid, type, enabled) VALUES (?, ?, 'InternalLbVm', 1)")) {
insert_cloud_pstmt.setLong(1, providerId);
insert_cloud_pstmt.setString(2, uuid);
insert_cloud_pstmt.executeUpdate();
@@ -1291,14 +1233,14 @@ private void upgradePhysicalNtwksWithInternalLbProvider(Connection conn) {
private void addHostDetailsIndex(Connection conn) {
logger.debug("Checking if host_details index exists, if not we will add it");
- try(PreparedStatement pstmt = conn.prepareStatement("SHOW INDEX FROM `cloud`.`host_details` where KEY_NAME = 'fk_host_details__host_id'");)
+ try(PreparedStatement pstmt = conn.prepareStatement("SHOW INDEX FROM `cloud`.`host_details` where KEY_NAME = 'fk_host_details__host_id'"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
if (rs.next()) {
logger.debug("Index already exists on host_details - not adding new one");
} else {
// add the index
- try(PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id` (`host_id`)");) {
+ try(PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id` (`host_id`)")) {
pstmtUpdate.executeUpdate();
logger.debug("Index did not exist on host_details - added new one");
}catch (SQLException e) {
@@ -1314,15 +1256,15 @@ private void addHostDetailsIndex(Connection conn) {
}
private void updateNetworksForPrivateGateways(Connection conn) {
- try(PreparedStatement pstmt = conn.prepareStatement("SELECT network_id, vpc_id FROM `cloud`.`vpc_gateways` WHERE type='Private' AND removed IS null");)
+ try(PreparedStatement pstmt = conn.prepareStatement("SELECT network_id, vpc_id FROM `cloud`.`vpc_gateways` WHERE type='Private' AND removed IS null"))
{
//1) get all non removed gateways
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
- Long networkId = rs.getLong(1);
- Long vpcId = rs.getLong(2);
+ long networkId = rs.getLong(1);
+ long vpcId = rs.getLong(2);
//2) Update networks with vpc_id if its set to NULL
- try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`networks` set vpc_id=? where id=? and vpc_id is NULL and removed is NULL");) {
+ try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`networks` set vpc_id=? where id=? and vpc_id is NULL and removed is NULL")) {
update_pstmt.setLong(1, vpcId);
update_pstmt.setLong(2, networkId);
update_pstmt.executeUpdate();
@@ -1339,13 +1281,13 @@ private void updateNetworksForPrivateGateways(Connection conn) {
}
private void removeFirewallServiceFromSharedNetworkOfferingWithSGService(Connection conn) {
- try(PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='DefaultSharedNetworkOfferingWithSGService'");)
+ try(PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='DefaultSharedNetworkOfferingWithSGService'"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
long id = rs.getLong(1);
// remove Firewall service for SG shared network offering
- try(PreparedStatement del_pstmt = conn.prepareStatement("DELETE from `cloud`.`ntwk_offering_service_map` where network_offering_id=? and service='Firewall'");) {
+ try(PreparedStatement del_pstmt = conn.prepareStatement("DELETE from `cloud`.`ntwk_offering_service_map` where network_offering_id=? and service='Firewall'")) {
del_pstmt.setLong(1, id);
del_pstmt.executeUpdate();
}catch (SQLException e) {
@@ -1362,9 +1304,9 @@ private void removeFirewallServiceFromSharedNetworkOfferingWithSGService(Connect
private void fix22xKVMSnapshots(Connection conn) {
logger.debug("Updating KVM snapshots");
- try (PreparedStatement pstmt = conn.prepareStatement("select id, backup_snap_id from `cloud`.`snapshots` where hypervisor_type='KVM' and removed is null and backup_snap_id is not null");)
+ try (PreparedStatement pstmt = conn.prepareStatement("select id, backup_snap_id from `cloud`.`snapshots` where hypervisor_type='KVM' and removed is null and backup_snap_id is not null"))
{
- try(ResultSet rs = pstmt.executeQuery();) {
+ try(ResultSet rs = pstmt.executeQuery()) {
while (rs.next()) {
long id = rs.getLong(1);
String backUpPath = rs.getString(2);
@@ -1374,8 +1316,8 @@ private void fix22xKVMSnapshots(Connection conn) {
int index = backUpPath.indexOf("snapshots" + File.separator);
if (index > 1) {
String correctedPath = backUpPath.substring(index);
- logger.debug("Updating Snapshot with id: " + id + " original backup path: " + backUpPath + " updated backup path: " + correctedPath);
- try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`snapshots` set backup_snap_id=? where id = ?");) {
+ logger.debug("Updating Snapshot with id: {} original backup path: {} updated backup path: {}", id, backUpPath, correctedPath);
+ try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`snapshots` set backup_snap_id=? where id = ?")) {
update_pstmt.setString(1, correctedPath);
update_pstmt.setLong(2, id);
update_pstmt.executeUpdate();
@@ -1401,8 +1343,8 @@ private void correctExternalNetworkDevicesSetup(Connection conn) {
try (
PreparedStatement zoneSearchStmt = conn.prepareStatement("SELECT id, networktype FROM `cloud`.`data_center`");
- ResultSet zoneResults = zoneSearchStmt.executeQuery();
- ){
+ ResultSet zoneResults = zoneSearchStmt.executeQuery()
+ ){
while (zoneResults.next()) {
long zoneId = zoneResults.getLong(1);
String networkType = zoneResults.getString(2);
@@ -1433,7 +1375,6 @@ private void correctExternalNetworkDevicesSetup(Connection conn) {
// balancers added in the zone
while (f5DevicesResult.next()) {
long f5HostId = f5DevicesResult.getLong(1);
- ;
addF5ServiceProvider(conn, physicalNetworkId, zoneId);
addF5LoadBalancer(conn, f5HostId, physicalNetworkId);
}
@@ -1443,7 +1384,7 @@ private void correctExternalNetworkDevicesSetup(Connection conn) {
try (PreparedStatement fetchSRXNspStmt =
conn.prepareStatement("SELECT id from `cloud`.`physical_network_service_providers` where physical_network_id=" + physicalNetworkId +
" and provider_name = 'JuniperSRX'");
- ResultSet rsSRXNSP = fetchSRXNspStmt.executeQuery();) {
+ ResultSet rsSRXNSP = fetchSRXNspStmt.executeQuery()) {
hasSrxNsp = rsSRXNSP.next();
}
@@ -1477,8 +1418,8 @@ private void addF5LoadBalancer(Connection conn, long hostId, long physicalNetwor
String insertF5 =
"INSERT INTO `cloud`.`external_load_balancer_devices` (physical_network_id, host_id, provider_name, "
+ "device_name, capacity, is_dedicated, device_state, allocation_state, is_managed, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";
- try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertF5);) {
- logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId);
+ try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertF5)) {
+ logger.debug("Adding F5 Big IP load balancer with host id {} in to physical network{}", hostId, physicalNetworkId);
pstmtUpdate.setLong(1, physicalNetworkId);
pstmtUpdate.setLong(2, hostId);
pstmtUpdate.setString(3, "F5BigIp");
@@ -1499,8 +1440,8 @@ private void addSrxFirewall(Connection conn, long hostId, long physicalNetworkId
String insertSrx =
"INSERT INTO `cloud`.`external_firewall_devices` (physical_network_id, host_id, provider_name, "
+ "device_name, capacity, is_dedicated, device_state, allocation_state, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)";
- try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertSrx);) {
- logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId);
+ try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertSrx)) {
+ logger.debug("Adding SRX firewall device with host id {} in to physical network{}", hostId, physicalNetworkId);
pstmtUpdate.setLong(1, physicalNetworkId);
pstmtUpdate.setLong(2, hostId);
pstmtUpdate.setString(3, "JuniperSRX");
@@ -1522,9 +1463,9 @@ private void addF5ServiceProvider(Connection conn, long physicalNetworkId, long
+ "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`,"
+ "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`,"
+ "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,0,0,0,1,0,0,0,0)";
- try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP);) {
+ try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP)) {
// add physical network service provider - F5BigIp
- logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId);
+ logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp in to physical network{}", physicalNetworkId);
pstmtUpdate.setString(1, UUID.randomUUID().toString());
pstmtUpdate.setLong(2, physicalNetworkId);
pstmtUpdate.setString(3, "F5BigIp");
@@ -1541,7 +1482,7 @@ private void addSrxServiceProvider(Connection conn, long physicalNetworkId, long
+ "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`,"
+ "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`,"
+ "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,1,1,1,0,1,1,0,0)";
- try( PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP);) {
+ try( PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP)) {
// add physical network service provider - JuniperSRX
logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX");
pstmtUpdate.setString(1, UUID.randomUUID().toString());
@@ -1563,16 +1504,15 @@ private void addSrxServiceProvider(Connection conn, long physicalNetworkId, long
// they are made in lowercase. On upgrade change the host details name to lower case
private void fixZoneUsingExternalDevices(Connection conn) {
//Get zones to upgrade
- List zoneIds = new ArrayList();
- ResultSet rs = null;
+ List zoneIds = new ArrayList<>();
long networkOfferingId, networkId;
long f5DeviceId, f5HostId;
long srxDevivceId, srxHostId;
try(PreparedStatement sel_id_pstmt =
- conn.prepareStatement("select id from `cloud`.`data_center` where lb_provider='F5BigIp' or firewall_provider='JuniperSRX' or gateway_provider='JuniperSRX'");)
+ conn.prepareStatement("select id from `cloud`.`data_center` where lb_provider='F5BigIp' or firewall_provider='JuniperSRX' or gateway_provider='JuniperSRX'"))
{
- try(ResultSet sel_id_rs = sel_id_pstmt.executeQuery();) {
+ try(ResultSet sel_id_rs = sel_id_pstmt.executeQuery()) {
while (sel_id_rs.next()) {
zoneIds.add(sel_id_rs.getLong(1));
}
@@ -1583,14 +1523,14 @@ private void fixZoneUsingExternalDevices(Connection conn) {
throw new CloudRuntimeException("fixZoneUsingExternalDevices:Exception:"+e.getMessage(), e);
}
- if (zoneIds.size() == 0) {
+ if (zoneIds.isEmpty()) {
return; // no zones using F5 and SRX devices so return
}
// find the default network offering created for external devices during upgrade from 2.2.14
- try(PreparedStatement sel_id_off_pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='Isolated with external providers' ");)
+ try(PreparedStatement sel_id_off_pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='Isolated with external providers' "))
{
- try(ResultSet sel_id_off_rs = sel_id_off_pstmt.executeQuery();) {
+ try(ResultSet sel_id_off_rs = sel_id_off_pstmt.executeQuery()) {
if (sel_id_off_rs.first()) {
networkOfferingId = sel_id_off_rs.getLong(1);
} else {
@@ -1605,9 +1545,9 @@ private void fixZoneUsingExternalDevices(Connection conn) {
for (Long zoneId : zoneIds) {
try {
// find the F5 device id in the zone
- try(PreparedStatement sel_id_host_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' AND removed IS NULL");) {
+ try(PreparedStatement sel_id_host_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' AND removed IS NULL")) {
sel_id_host_pstmt.setLong(1, zoneId);
- try(ResultSet sel_id_host_pstmt_rs = sel_id_host_pstmt.executeQuery();) {
+ try(ResultSet sel_id_host_pstmt_rs = sel_id_host_pstmt.executeQuery()) {
if (sel_id_host_pstmt_rs.first()) {
f5HostId = sel_id_host_pstmt_rs.getLong(1);
} else {
@@ -1619,9 +1559,9 @@ private void fixZoneUsingExternalDevices(Connection conn) {
}catch (SQLException e) {
throw new CloudRuntimeException("fixZoneUsingExternalDevices:Exception:"+e.getMessage(), e);
}
- try(PreparedStatement sel_id_ext_pstmt = conn.prepareStatement("SELECT id FROM external_load_balancer_devices WHERE host_id=?");) {
+ try(PreparedStatement sel_id_ext_pstmt = conn.prepareStatement("SELECT id FROM external_load_balancer_devices WHERE host_id=?")) {
sel_id_ext_pstmt.setLong(1, f5HostId);
- try(ResultSet sel_id_ext_rs = sel_id_ext_pstmt.executeQuery();) {
+ try(ResultSet sel_id_ext_rs = sel_id_ext_pstmt.executeQuery()) {
if (sel_id_ext_rs.first()) {
f5DeviceId = sel_id_ext_rs.getLong(1);
} else {
@@ -1636,9 +1576,9 @@ private void fixZoneUsingExternalDevices(Connection conn) {
}
// find the SRX device id in the zone
- try(PreparedStatement sel_id_hostdc_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalFirewall' AND removed IS NULL");) {
+ try(PreparedStatement sel_id_hostdc_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalFirewall' AND removed IS NULL")) {
sel_id_hostdc_pstmt.setLong(1, zoneId);
- try(ResultSet sel_id_hostdc_pstmt_rs = sel_id_hostdc_pstmt.executeQuery();) {
+ try(ResultSet sel_id_hostdc_pstmt_rs = sel_id_hostdc_pstmt.executeQuery()) {
if (sel_id_hostdc_pstmt_rs.first()) {
srxHostId = sel_id_hostdc_pstmt_rs.getLong(1);
} else {
@@ -1651,9 +1591,9 @@ private void fixZoneUsingExternalDevices(Connection conn) {
throw new CloudRuntimeException("fixZoneUsingExternalDevices:Exception:"+e.getMessage(), e);
}
- try(PreparedStatement sel_id_ext_frwl_pstmt = conn.prepareStatement("SELECT id FROM external_firewall_devices WHERE host_id=?");) {
+ try(PreparedStatement sel_id_ext_frwl_pstmt = conn.prepareStatement("SELECT id FROM external_firewall_devices WHERE host_id=?")) {
sel_id_ext_frwl_pstmt.setLong(1, srxHostId);
- try(ResultSet sel_id_ext_frwl_pstmt_rs = sel_id_ext_frwl_pstmt.executeQuery();) {
+ try(ResultSet sel_id_ext_frwl_pstmt_rs = sel_id_ext_frwl_pstmt.executeQuery()) {
if (sel_id_ext_frwl_pstmt_rs.first()) {
srxDevivceId = sel_id_ext_frwl_pstmt_rs.getLong(1);
} else {
@@ -1669,10 +1609,10 @@ private void fixZoneUsingExternalDevices(Connection conn) {
// check if network any uses F5 or SRX devices in the zone
try(PreparedStatement sel_id_cloud_pstmt =
- conn.prepareStatement("select id from `cloud`.`networks` where guest_type='Virtual' and data_center_id=? and network_offering_id=? and removed IS NULL");) {
+ conn.prepareStatement("select id from `cloud`.`networks` where guest_type='Virtual' and data_center_id=? and network_offering_id=? and removed IS NULL")) {
sel_id_cloud_pstmt.setLong(1, zoneId);
sel_id_cloud_pstmt.setLong(2, networkOfferingId);
- try(ResultSet sel_id_cloud_pstmt_rs = sel_id_cloud_pstmt.executeQuery();) {
+ try(ResultSet sel_id_cloud_pstmt_rs = sel_id_cloud_pstmt.executeQuery()) {
while (sel_id_cloud_pstmt_rs.next()) {
// get the network Id
networkId = sel_id_cloud_pstmt_rs.getLong(1);
@@ -1680,7 +1620,7 @@ private void fixZoneUsingExternalDevices(Connection conn) {
// add mapping for the network in network_external_lb_device_map
String insertLbMapping =
"INSERT INTO `cloud`.`network_external_lb_device_map` (uuid, network_id, external_load_balancer_device_id, created) VALUES ( ?, ?, ?, now())";
- try (PreparedStatement insert_lb_stmt = conn.prepareStatement(insertLbMapping);) {
+ try (PreparedStatement insert_lb_stmt = conn.prepareStatement(insertLbMapping)) {
insert_lb_stmt.setString(1, UUID.randomUUID().toString());
insert_lb_stmt.setLong(2, networkId);
insert_lb_stmt.setLong(3, f5DeviceId);
@@ -1688,12 +1628,12 @@ private void fixZoneUsingExternalDevices(Connection conn) {
} catch (SQLException e) {
throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e);
}
- logger.debug("Successfully added entry in network_external_lb_device_map for network " + networkId + " and F5 device ID " + f5DeviceId);
+ logger.debug("Successfully added entry in network_external_lb_device_map for network {} and F5 device ID {}", networkId, f5DeviceId);
// add mapping for the network in network_external_firewall_device_map
String insertFwMapping =
"INSERT INTO `cloud`.`network_external_firewall_device_map` (uuid, network_id, external_firewall_device_id, created) VALUES ( ?, ?, ?, now())";
- try (PreparedStatement insert_ext_firewall_stmt = conn.prepareStatement(insertFwMapping);) {
+ try (PreparedStatement insert_ext_firewall_stmt = conn.prepareStatement(insertFwMapping)) {
insert_ext_firewall_stmt.setString(1, UUID.randomUUID().toString());
insert_ext_firewall_stmt.setLong(2, networkId);
insert_ext_firewall_stmt.setLong(3, srxDevivceId);
@@ -1701,7 +1641,7 @@ private void fixZoneUsingExternalDevices(Connection conn) {
} catch (SQLException e) {
throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e);
}
- logger.debug("Successfully added entry in network_external_firewall_device_map for network " + networkId + " and SRX device ID " + srxDevivceId);
+ logger.debug("Successfully added entry in network_external_firewall_device_map for network {} and SRX device ID {}", networkId, srxDevivceId);
}
}catch (SQLException e) {
throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e);
@@ -1711,10 +1651,10 @@ private void fixZoneUsingExternalDevices(Connection conn) {
}
// update host details for F5 and SRX devices
logger.debug("Updating the host details for F5 and SRX devices");
- try(PreparedStatement sel_pstmt = conn.prepareStatement("SELECT host_id, name FROM `cloud`.`host_details` WHERE host_id=? OR host_id=?");) {
+ try(PreparedStatement sel_pstmt = conn.prepareStatement("SELECT host_id, name FROM `cloud`.`host_details` WHERE host_id=? OR host_id=?")) {
sel_pstmt.setLong(1, f5HostId);
sel_pstmt.setLong(2, srxHostId);
- try(ResultSet sel_rs = sel_pstmt.executeQuery();) {
+ try(ResultSet sel_rs = sel_pstmt.executeQuery()) {
while (sel_rs.next()) {
long hostId = sel_rs.getLong(1);
String camlCaseName = sel_rs.getString(2);
@@ -1723,7 +1663,7 @@ private void fixZoneUsingExternalDevices(Connection conn) {
continue;
}
String lowerCaseName = camlCaseName.toLowerCase();
- try (PreparedStatement update_pstmt = conn.prepareStatement("update `cloud`.`host_details` set name=? where host_id=? AND name=?");) {
+ try (PreparedStatement update_pstmt = conn.prepareStatement("update `cloud`.`host_details` set name=? where host_id=? AND name=?")) {
update_pstmt.setString(1, lowerCaseName);
update_pstmt.setLong(2, hostId);
update_pstmt.setString(3, camlCaseName);
@@ -1750,7 +1690,6 @@ private void fixZoneUsingExternalDevices(Connection conn) {
private void migrateSecondaryStorageToImageStore(Connection conn) {
String sqlSelectS3Count = "select count(*) from `cloud`.`s3`";
String sqlSelectSwiftCount = "select count(*) from `cloud`.`swift`";
- String sqlInsertStoreDetail = "INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)";
String sqlUpdateHostAsRemoved = "UPDATE `cloud`.`host` SET removed = now() WHERE type = 'SecondaryStorage' and removed is null";
logger.debug("Migrating secondary storage to image store");
@@ -1758,7 +1697,6 @@ private void migrateSecondaryStorageToImageStore(Connection conn) {
try (
PreparedStatement pstmtSelectS3Count = conn.prepareStatement(sqlSelectS3Count);
PreparedStatement pstmtSelectSwiftCount = conn.prepareStatement(sqlSelectSwiftCount);
- PreparedStatement storeDetailInsert = conn.prepareStatement(sqlInsertStoreDetail);
PreparedStatement storeInsert =
conn.prepareStatement("INSERT INTO `cloud`.`image_store` (id, uuid, name, image_provider_name, protocol, url, data_center_id, scope, role, parent, total_size, created, removed) values(?, ?, ?, 'NFS', 'nfs', ?, ?, 'ZONE', ?, ?, ?, ?, ?)");
PreparedStatement nfsQuery =
@@ -1766,8 +1704,8 @@ private void migrateSecondaryStorageToImageStore(Connection conn) {
PreparedStatement pstmtUpdateHostAsRemoved = conn.prepareStatement(sqlUpdateHostAsRemoved);
ResultSet rsSelectS3Count = pstmtSelectS3Count.executeQuery();
ResultSet rsSelectSwiftCount = pstmtSelectSwiftCount.executeQuery();
- ResultSet rsNfs = nfsQuery.executeQuery();
- ) {
+ ResultSet rsNfs = nfsQuery.executeQuery()
+ ) {
logger.debug("Checking if we need to migrate NFS secondary storage to image store or staging store");
int numRows = 0;
if (rsSelectS3Count.next()) {
@@ -1786,11 +1724,11 @@ private void migrateSecondaryStorageToImageStore(Connection conn) {
store_role = "ImageCache";
}
- logger.debug("Migrating NFS secondary storage to " + store_role + " store");
+ logger.debug("Migrating NFS secondary storage to {} store", store_role);
// migrate NFS secondary storage, for nfs, keep previous host_id as the store_id
while (rsNfs.next()) {
- Long nfs_id = rsNfs.getLong("id");
+ long nfs_id = rsNfs.getLong("id");
String nfs_uuid = rsNfs.getString("uuid");
String nfs_url = rsNfs.getString("url");
String nfs_parent = rsNfs.getString("parent");
@@ -1832,19 +1770,19 @@ private void migrateSecondaryStorageToImageStore(Connection conn) {
private void migrateVolumeHostRef(Connection conn) {
logger.debug("Updating volume_store_ref table from volume_host_ref table");
try(PreparedStatement volStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`volume_store_ref` (store_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, update_count, ref_cnt, state) select host_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, 0, 0, 'Allocated' from `cloud`.`volume_host_ref`");)
+ conn.prepareStatement("INSERT INTO `cloud`.`volume_store_ref` (store_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, update_count, ref_cnt, state) select host_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, 0, 0, 'Allocated' from `cloud`.`volume_host_ref`"))
{
int rowCount = volStoreInsert.executeUpdate();
- logger.debug("Insert modified " + rowCount + " rows");
- try(PreparedStatement volStoreUpdate = conn.prepareStatement("update `cloud`.`volume_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'");) {
+ logger.debug(INSERT_MODIFIED_ROWS, rowCount);
+ try(PreparedStatement volStoreUpdate = conn.prepareStatement("update `cloud`.`volume_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'")) {
rowCount = volStoreUpdate.executeUpdate();
- logger.debug("Update modified " + rowCount + " rows");
+ logger.debug(UPDATE_MODIFIED_ROWS, rowCount);
}catch (SQLException e) {
- logger.error("Unable to migrate volume_host_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate volume_host_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate volume_host_ref." + e.getMessage(),e);
}
} catch (SQLException e) {
- logger.error("Unable to migrate volume_host_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate volume_host_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate volume_host_ref." + e.getMessage(),e);
}
logger.debug("Completed updating volume_store_ref table from volume_host_ref table");
@@ -1854,20 +1792,20 @@ private void migrateVolumeHostRef(Connection conn) {
private void migrateTemplateHostRef(Connection conn) {
logger.debug("Updating template_store_ref table from template_host_ref table");
try (PreparedStatement tmplStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, update_count, ref_cnt, store_role, state) select host_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, 0, 0, 'Image', 'Allocated' from `cloud`.`template_host_ref`");)
+ conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, update_count, ref_cnt, store_role, state) select host_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, 0, 0, 'Image', 'Allocated' from `cloud`.`template_host_ref`"))
{
int rowCount = tmplStoreInsert.executeUpdate();
- logger.debug("Insert modified " + rowCount + " rows");
+ logger.debug(INSERT_MODIFIED_ROWS, rowCount);
- try(PreparedStatement tmplStoreUpdate = conn.prepareStatement("update `cloud`.`template_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'");) {
+ try(PreparedStatement tmplStoreUpdate = conn.prepareStatement("update `cloud`.`template_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'")) {
rowCount = tmplStoreUpdate.executeUpdate();
}catch (SQLException e) {
- logger.error("Unable to migrate template_host_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate template_host_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate template_host_ref." + e.getMessage(), e);
}
- logger.debug("Update modified " + rowCount + " rows");
+ logger.debug(UPDATE_MODIFIED_ROWS, rowCount);
} catch (SQLException e) {
- logger.error("Unable to migrate template_host_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate template_host_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate template_host_ref." + e.getMessage(), e);
}
logger.debug("Completed updating template_store_ref table from template_host_ref table");
@@ -1877,22 +1815,22 @@ private void migrateTemplateHostRef(Connection conn) {
private void migrateSnapshotStoreRef(Connection conn) {
logger.debug("Updating snapshot_store_ref table from snapshots table");
try(PreparedStatement snapshotStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and sechost_id is not null and removed is null");
+ conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and sechost_id is not null and removed is null")
) {
//Update all snapshots except KVM snapshots
int rowCount = snapshotStoreInsert.executeUpdate();
- logger.debug("Inserted " + rowCount + " snapshots into snapshot_store_ref");
+ logger.debug("Inserted {} snapshots into snapshot_store_ref", rowCount);
//backsnap_id for KVM snapshots is complete path. CONCAT is not required
try(PreparedStatement snapshotStoreInsert_2 =
- conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, backup_snap_id, volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type = 'KVM' and sechost_id is not null and removed is null");) {
+ conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, backup_snap_id, volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type = 'KVM' and sechost_id is not null and removed is null")) {
rowCount = snapshotStoreInsert_2.executeUpdate();
- logger.debug("Inserted " + rowCount + " KVM snapshots into snapshot_store_ref");
+ logger.debug("Inserted {} KVM snapshots into snapshot_store_ref", rowCount);
}catch (SQLException e) {
- logger.error("Unable to migrate snapshot_store_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate snapshot_store_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate snapshot_store_ref." + e.getMessage(),e);
}
} catch (SQLException e) {
- logger.error("Unable to migrate snapshot_store_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate snapshot_store_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate snapshot_store_ref." + e.getMessage(),e);
}
logger.debug("Completed updating snapshot_store_ref table from snapshots table");
@@ -1901,7 +1839,7 @@ private void migrateSnapshotStoreRef(Connection conn) {
// migrate secondary storages S3 from s3 tables to image_store table
private void migrateS3ToImageStore(Connection conn) {
Long storeId = null;
- Map s3_store_id_map = new HashMap();
+ Map s3_store_id_map = new HashMap<>();
logger.debug("Migrating S3 to image store");
try (
@@ -1913,8 +1851,8 @@ private void migrateS3ToImageStore(Connection conn) {
"values(?, ?, 'S3', ?, 'REGION', 'Image', ?)");
PreparedStatement s3Query = conn.prepareStatement("select id, uuid, access_key, secret_key, end_point, bucket, https, connection_timeout, " +
"max_error_retry, socket_timeout, created from `cloud`.`s3`");
- ResultSet rs = s3Query.executeQuery();
- ) {
+ ResultSet rs = s3Query.executeQuery()
+ ) {
while (rs.next()) {
Long s3_id = rs.getLong("id");
@@ -1923,7 +1861,7 @@ private void migrateS3ToImageStore(Connection conn) {
String s3_secretkey = rs.getString("secret_key");
String s3_endpoint = rs.getString("end_point");
String s3_bucket = rs.getString("bucket");
- boolean s3_https = rs.getObject("https") != null ? (rs.getInt("https") == 0 ? false : true) : false;
+ boolean s3_https = rs.getObject("https") != null && (rs.getInt("https") != 0);
Integer s3_connectiontimeout = rs.getObject("connection_timeout") != null ? rs.getInt("connection_timeout") : null;
Integer s3_retry = rs.getObject("max_error_retry") != null ? rs.getInt("max_error_retry") : null;
Integer s3_sockettimeout = rs.getObject("socket_timeout") != null ? rs.getInt("socket_timeout") : null;
@@ -1939,13 +1877,13 @@ private void migrateS3ToImageStore(Connection conn) {
storeInsert.executeUpdate();
storeQuery.setString(1, s3_uuid);
- try (ResultSet storeInfo = storeQuery.executeQuery();) {
+ try (ResultSet storeInfo = storeQuery.executeQuery()) {
if (storeInfo.next()) {
storeId = storeInfo.getLong("id");
}
}
- Map detailMap = new HashMap();
+ Map detailMap = new HashMap<>();
detailMap.put(ApiConstants.S3_ACCESS_KEY, s3_accesskey);
detailMap.put(ApiConstants.S3_SECRET_KEY, s3_secretkey);
detailMap.put(ApiConstants.S3_BUCKET_NAME, s3_bucket);
@@ -1961,9 +1899,7 @@ private void migrateS3ToImageStore(Connection conn) {
detailMap.put(ApiConstants.S3_SOCKET_TIMEOUT, String.valueOf(s3_sockettimeout));
}
- Iterator keyIt = detailMap.keySet().iterator();
- while (keyIt.hasNext()) {
- String key = keyIt.next();
+ for (String key : detailMap.keySet()) {
String val = detailMap.get(key);
storeDetailInsert.setLong(1, storeId);
storeDetailInsert.setString(2, key);
@@ -1991,18 +1927,18 @@ private void migrateS3ToImageStore(Connection conn) {
private void migrateTemplateS3Ref(Connection conn, Map s3StoreMap) {
logger.debug("Updating template_store_ref table from template_s3_ref table");
try(PreparedStatement tmplStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')");
+ conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')")
) {
try(PreparedStatement s3Query =
- conn.prepareStatement("select template_s3_ref.s3_id, template_s3_ref.template_id, template_s3_ref.created, template_s3_ref.size, template_s3_ref.physical_size, vm_template.account_id from `cloud`.`template_s3_ref`, `cloud`.`vm_template` where vm_template.id = template_s3_ref.template_id");) {
- try(ResultSet rs = s3Query.executeQuery();) {
+ conn.prepareStatement("select template_s3_ref.s3_id, template_s3_ref.template_id, template_s3_ref.created, template_s3_ref.size, template_s3_ref.physical_size, vm_template.account_id from `cloud`.`template_s3_ref`, `cloud`.`vm_template` where vm_template.id = template_s3_ref.template_id")) {
+ try(ResultSet rs = s3Query.executeQuery()) {
while (rs.next()) {
Long s3_id = rs.getLong("s3_id");
- Long s3_tmpl_id = rs.getLong("template_id");
+ long s3_tmpl_id = rs.getLong("template_id");
Date s3_created = rs.getDate("created");
Long s3_size = rs.getObject("size") != null ? rs.getLong("size") : null;
Long s3_psize = rs.getObject("physical_size") != null ? rs.getLong("physical_size") : null;
- Long account_id = rs.getLong("account_id");
+ long account_id = rs.getLong("account_id");
tmplStoreInsert.setLong(1, s3StoreMap.get(s3_id));
tmplStoreInsert.setLong(2, s3_tmpl_id);
tmplStoreInsert.setDate(3, s3_created);
@@ -2022,15 +1958,15 @@ private void migrateTemplateS3Ref(Connection conn, Map s3StoreMap) {
tmplStoreInsert.executeUpdate();
}
}catch (SQLException e) {
- logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate template_s3_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e);
}
}catch (SQLException e) {
- logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate template_s3_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e);
}
} catch (SQLException e) {
- logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e);
+ logger.error("Unable to migrate template_s3_ref.{}", e.getMessage(), e);
throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e);
}
logger.debug("Completed migrating template_s3_ref table.");
@@ -2040,19 +1976,19 @@ private void migrateTemplateS3Ref(Connection conn, Map s3StoreMap) {
private void migrateSnapshotS3Ref(Connection conn, Map s3StoreMap) {
logger.debug("Updating snapshot_store_ref table from snapshots table for s3");
try(PreparedStatement snapshotStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')");
+ conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')")
) {
try(PreparedStatement s3Query =
- conn.prepareStatement("select s3_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and s3_id is not null and removed is null");) {
- try(ResultSet rs = s3Query.executeQuery();) {
+ conn.prepareStatement("select s3_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and s3_id is not null and removed is null")) {
+ try(ResultSet rs = s3Query.executeQuery()) {
while (rs.next()) {
Long s3_id = rs.getLong("s3_id");
- Long snapshot_id = rs.getLong("id");
+ long snapshot_id = rs.getLong("id");
Date s3_created = rs.getDate("created");
Long s3_size = rs.getObject("size") != null ? rs.getLong("size") : null;
Long s3_prev_id = rs.getObject("prev_snap_id") != null ? rs.getLong("prev_snap_id") : null;
String install_path = rs.getString(6);
- Long s3_vol_id = rs.getLong("volume_id");
+ long s3_vol_id = rs.getLong("volume_id");
snapshotStoreInsert.setLong(1, s3StoreMap.get(s3_id));
snapshotStoreInsert.setLong(2, snapshot_id);
@@ -2072,15 +2008,15 @@ private void migrateSnapshotS3Ref(Connection conn, Map s3StoreMap) {
snapshotStoreInsert.executeUpdate();
}
}catch (SQLException e) {
- logger.error("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e);
+ logger.error("migrateSnapshotS3Ref:Exception:{}", e.getMessage(), e);
throw new CloudRuntimeException("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e);
}
}catch (SQLException e) {
- logger.error("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e);
+ logger.error("migrateSnapshotS3Ref:Exception:{}", e.getMessage(), e);
throw new CloudRuntimeException("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e);
}
} catch (SQLException e) {
- logger.error("Unable to migrate s3 backedup snapshots to snapshot_store_ref." + e.getMessage());
+ logger.error("Unable to migrate s3 backedup snapshots to snapshot_store_ref.{}", e.getMessage());
throw new CloudRuntimeException("Unable to migrate s3 backedup snapshots to snapshot_store_ref." + e.getMessage(), e);
}
logger.debug("Completed updating snapshot_store_ref table from s3 snapshots entries");
@@ -2089,7 +2025,7 @@ private void migrateSnapshotS3Ref(Connection conn, Map s3StoreMap) {
// migrate secondary storages Swift from swift tables to image_store table
private void migrateSwiftToImageStore(Connection conn) {
Long storeId = null;
- Map swift_store_id_map = new HashMap();
+ Map swift_store_id_map = new HashMap<>();
logger.debug("Migrating Swift to image store");
try (
@@ -2100,8 +2036,8 @@ private void migrateSwiftToImageStore(Connection conn) {
PreparedStatement storeInsert =
conn.prepareStatement("INSERT INTO `cloud`.`image_store` (uuid, name, image_provider_name, protocol, url, scope, role, created) values(?, ?, 'Swift', 'http', ?, 'REGION', 'Image', ?)");
PreparedStatement swiftQuery = conn.prepareStatement("select id, uuid, url, account, username, swift.key, created from `cloud`.`swift`");
- ResultSet rs = swiftQuery.executeQuery();
- ) {
+ ResultSet rs = swiftQuery.executeQuery()
+ ) {
while (rs.next()) {
Long swift_id = rs.getLong("id");
String swift_uuid = rs.getString("uuid");
@@ -2120,20 +2056,18 @@ private void migrateSwiftToImageStore(Connection conn) {
storeInsert.executeUpdate();
storeQuery.setString(1, swift_uuid);
- try (ResultSet storeInfo = storeQuery.executeQuery();) {
+ try (ResultSet storeInfo = storeQuery.executeQuery()) {
if (storeInfo.next()) {
storeId = storeInfo.getLong("id");
}
}
- Map detailMap = new HashMap();
+ Map detailMap = new HashMap<>();
detailMap.put(ApiConstants.ACCOUNT, swift_account);
detailMap.put(ApiConstants.USERNAME, swift_username);
detailMap.put(ApiConstants.KEY, swift_key);
- Iterator keyIt = detailMap.keySet().iterator();
- while (keyIt.hasNext()) {
- String key = keyIt.next();
+ for (String key : detailMap.keySet()) {
String val = detailMap.get(key);
storeDetailInsert.setLong(1, storeId);
storeDetailInsert.setString(2, key);
@@ -2164,11 +2098,11 @@ private void migrateTemplateSwiftRef(Connection conn, Map swiftStore
PreparedStatement tmplStoreInsert =
conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')");
PreparedStatement s3Query = conn.prepareStatement("select swift_id, template_id, created, path, size, physical_size from `cloud`.`template_swift_ref`");
- ResultSet rs = s3Query.executeQuery();
- ) {
+ ResultSet rs = s3Query.executeQuery()
+ ) {
while (rs.next()) {
Long swift_id = rs.getLong("swift_id");
- Long tmpl_id = rs.getLong("template_id");
+ long tmpl_id = rs.getLong("template_id");
Date created = rs.getDate("created");
String path = rs.getString("path");
Long size = rs.getObject("size") != null ? rs.getLong("size") : null;
@@ -2203,19 +2137,19 @@ private void migrateTemplateSwiftRef(Connection conn, Map swiftStore
private void migrateSnapshotSwiftRef(Connection conn, Map swiftStoreMap) {
logger.debug("Updating snapshot_store_ref table from snapshots table for swift");
try (PreparedStatement snapshotStoreInsert =
- conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')");
+ conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')")
){
try(PreparedStatement s3Query =
- conn.prepareStatement("select swift_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and swift_id is not null and removed is null");) {
- try(ResultSet rs = s3Query.executeQuery();) {
+ conn.prepareStatement("select swift_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and swift_id is not null and removed is null")) {
+ try(ResultSet rs = s3Query.executeQuery()) {
while (rs.next()) {
Long swift_id = rs.getLong("swift_id");
- Long snapshot_id = rs.getLong("id");
+ long snapshot_id = rs.getLong("id");
Date created = rs.getDate("created");
- Long size = rs.getLong("size");
- Long prev_id = rs.getLong("prev_snap_id");
+ long size = rs.getLong("size");
+ long prev_id = rs.getLong("prev_snap_id");
String install_path = rs.getString(6);
- Long vol_id = rs.getLong("volume_id");
+ long vol_id = rs.getLong("volume_id");
snapshotStoreInsert.setLong(1, swiftStoreMap.get(swift_id));
snapshotStoreInsert.setLong(2, snapshot_id);
@@ -2227,15 +2161,15 @@ private void migrateSnapshotSwiftRef(Connection conn, Map swiftStore
snapshotStoreInsert.executeUpdate();
}
}catch (SQLException e) {
- logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
+ logger.error("migrateSnapshotSwiftRef:Exception:{}", e.getMessage(), e);
throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
}
}catch (SQLException e) {
- logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
+ logger.error("migrateSnapshotSwiftRef:Exception:{}", e.getMessage(), e);
throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
}
} catch (SQLException e) {
- logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
+ logger.error("migrateSnapshotSwiftRef:Exception:{}", e.getMessage(), e);
throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e);
}
logger.debug("Completed updating snapshot_store_ref table from swift snapshots entries");
@@ -2243,12 +2177,12 @@ private void migrateSnapshotSwiftRef(Connection conn, Map swiftStore
private void fixNiciraKeys(Connection conn) {
//First drop the key if it exists.
- List keys = new ArrayList();
+ List keys = new ArrayList<>();
logger.debug("Dropping foreign key fk_nicira_nvp_nic_map__nic from the table nicira_nvp_nic_map if it exists");
keys.add("fk_nicira_nvp_nic_map__nic");
DbUpgradeUtils.dropKeysIfExist(conn, "nicira_nvp_nic_map", keys, true);
//Now add foreign key.
- try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`nicira_nvp_nic_map` ADD CONSTRAINT `fk_nicira_nvp_nic_map__nic` FOREIGN KEY (`nic`) REFERENCES `nics` (`uuid`) ON DELETE CASCADE");)
+ try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`nicira_nvp_nic_map` ADD CONSTRAINT `fk_nicira_nvp_nic_map__nic` FOREIGN KEY (`nic`) REFERENCES `nics` (`uuid`) ON DELETE CASCADE"))
{
pstmt.executeUpdate();
logger.debug("Added foreign key fk_nicira_nvp_nic_map__nic to the table nicira_nvp_nic_map");
@@ -2259,13 +2193,13 @@ private void fixNiciraKeys(Connection conn) {
private void fixRouterKeys(Connection conn) {
//First drop the key if it exists.
- List keys = new ArrayList();
+ List