diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000000..2af9f54edc44 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,22 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +/plugins/storage/volume/linstor @rp- +/plugins/storage/volume/storpool @slavkap + +.pre-commit-config.yaml @jbampton +/.github/linters/ @jbampton diff --git a/.github/ISSUE_TEMPLATE/feature_request.yaml b/.github/ISSUE_TEMPLATE/feature_request.yml similarity index 100% rename from .github/ISSUE_TEMPLATE/feature_request.yaml rename to .github/ISSUE_TEMPLATE/feature_request.yml diff --git a/.github/boring-cyborg.yml b/.github/boring-cyborg.yml index dfab81d0f79d..90fd24c8180a 100644 --- a/.github/boring-cyborg.yml +++ b/.github/boring-cyborg.yml @@ -46,8 +46,6 @@ labelPRBasedOnFilePath: "component:dpdk": - server/src/main/java/com/cloud/hypervisor/kvm/dpdk/* - plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/dpdk/* - "component:hyperv": - - plugins/hypervisors/hyperv/* "component:integration-test": - test/integration/* "component:ipv6": diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000000..41b307863fc3 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,30 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "maven" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "daily" + cooldown: + default-days: 7 diff --git a/.github/linters/.yamllint.yml b/.github/linters/.yamllint.yml index 8a72aee0adbe..97b66848696a 100644 --- a/.github/linters/.yamllint.yml +++ b/.github/linters/.yamllint.yml @@ -15,13 +15,14 @@ # specific language governing permissions and limitations # under the License. --- -extends: relaxed +extends: default rules: line-length: max: 400 # Very forgiving for GitHub Actions and infrastructure files indentation: disable # Disable indentation checking for existing files comments: disable # Disable comment formatting checks + braces: disable brackets: disable # Disable bracket spacing checks colons: max-spaces-after: -1 # Allow any number of spaces after colon diff --git a/.github/linters/codespell.txt b/.github/linters/codespell.txt index 27761c70f446..67cbeaa7cbb3 100644 --- a/.github/linters/codespell.txt +++ b/.github/linters/codespell.txt @@ -4,6 +4,7 @@ acount actuall acuiring acumulate +addin addreess addtion adminstrator @@ -12,10 +13,8 @@ afrer afterall againt ags -aktive algoritm allo -alloacate allocted alocation alogrithm @@ -65,6 +64,7 @@ bject boardcast bootstraper bu +callin cant capabilites capablity @@ -73,6 +73,7 @@ carrefully cavaet chaing checkd +checkin childs choosen chould @@ -93,7 +94,6 @@ confg configruation configuable conneciton -connexion constrait constraits containg @@ -101,9 +101,7 @@ contex continuesly contro controler -controles controll -convienient convinience coputer correcponding @@ -158,13 +156,13 @@ differnet differnt direcotry directroy -disale disbale discrepency disover dissapper dissassociated divice +dockin doesn' doesnot doesnt @@ -175,7 +173,6 @@ eanbled earch ect elemnt -eles elments emmited enble @@ -187,22 +184,19 @@ environmnet equivalant erro erronous -everthing everytime excute execept execption +exects execut executeable exeeded exisitng exisits -existin existsing -exitting expcted expection -explaination explicitely faield faild @@ -215,7 +209,6 @@ fillled findout fisrt fo -folowing fowarding frist fro @@ -234,6 +227,7 @@ hanling happend hasing hasnt +havin hda hostanme hould @@ -253,20 +247,14 @@ implmeneted implmentation incase includeing -incosistency indecates -indien infor informations informaton -infrastrcuture ingore -inital initalize initator -initilization inspite -instace instal instnace intefaces @@ -284,12 +272,8 @@ ist klunky lable leve -lief limite -linke listner -lokal -lokales maintainence maintenace maintenence @@ -298,7 +282,6 @@ mambers manaully manuel maxium -mehtod mergable mesage messge @@ -308,7 +291,6 @@ minumum mis modifers mor -mot mulitply multipl multple @@ -322,7 +304,7 @@ nin nodel nome noone -nowe +notin numbe numer occured @@ -390,12 +372,9 @@ remaning remore remvoing renabling -repeatly reponse reqest reqiured -requieres -requried reserv reserverd reseted @@ -414,14 +393,13 @@ retuned returing rever rocessor +roperty runing runnign sate scalled -scipt scirpt scrip -seconadry seconday seesion sepcified @@ -434,12 +412,10 @@ settig sevices shoul shoule -sie signle simplier singature skiping -snaphsot snpashot specied specifed @@ -450,7 +426,6 @@ standy statics stickyness stil -stip storeage strat streched @@ -459,7 +434,6 @@ succesfull successfull suceessful suces -sucessfully suiteable suppots suppport @@ -492,7 +466,6 @@ uncompressible uneccessarily unexepected unexpect -unknow unkonw unkown unneccessary @@ -500,14 +473,12 @@ unparseable unrecoginized unsupport unxpected -updat uptodate usera usign usin utlization vaidate -valiate valule valus varibles @@ -516,8 +487,6 @@ verfying verifing virutal visable -wakup wil wit -wll wth diff --git a/.github/workflows/main-sonar-check.yml b/.github/workflows/main-sonar-check.yml index 70cc3fbe19f3..224ea2cde801 100644 --- a/.github/workflows/main-sonar-check.yml +++ b/.github/workflows/main-sonar-check.yml @@ -44,14 +44,14 @@ jobs: cache: 'maven' - name: Cache SonarCloud packages - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.sonar/cache key: ${{ runner.os }}-sonar restore-keys: ${{ runner.os }}-sonar - name: Cache local Maven repository - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.m2/repository key: ${{ runner.os }}-m2-${{ hashFiles('pom.xml', '*/pom.xml', '*/*/pom.xml', '*/*/*/pom.xml') }} diff --git a/.github/workflows/linter.yml b/.github/workflows/pre-commit.yml similarity index 90% rename from .github/workflows/linter.yml rename to .github/workflows/pre-commit.yml index 6ff997412ff8..11fe5c068814 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/pre-commit.yml @@ -39,11 +39,11 @@ jobs: pip install pre-commit - name: Set PY run: echo "PY=$(python -VV | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV - - uses: actions/cache@v4 + - uses: actions/cache@v5 with: path: ~/.cache/pre-commit key: pre-commit|${{ env.PY }}|${{ hashFiles('.pre-commit-config.yaml') }} - name: Run pre-commit - run: pre-commit run --all-files + run: pre-commit run --color=always --all-files - name: Run manual pre-commit hooks - run: pre-commit run --all-files --hook-stage manual + run: pre-commit run --color=always --all-files --hook-stage manual diff --git a/.github/workflows/sonar-check.yml b/.github/workflows/sonar-check.yml index 46bfdd7d015c..31fb671cc58f 100644 --- a/.github/workflows/sonar-check.yml +++ b/.github/workflows/sonar-check.yml @@ -46,14 +46,14 @@ jobs: cache: 'maven' - name: Cache SonarCloud packages - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.sonar/cache key: ${{ runner.os }}-sonar restore-keys: ${{ runner.os }}-sonar - name: Cache local Maven repository - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: ~/.m2/repository key: ${{ runner.os }}-m2-${{ hashFiles('pom.xml', '*/pom.xml', '*/*/pom.xml', '*/*/*/pom.xml') }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 000000000000..e90c75979b6d --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,43 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +name: 'Close stale issues and PRs' +on: + schedule: + - cron: '30 1 * * *' + +jobs: + stale: + runs-on: ubuntu-latest + permissions: + actions: write + issues: write + pull-requests: write + steps: + - uses: actions/stale@v10 + with: + stale-issue-message: 'This issue is stale because it has been open for 120 days with no activity. It may be removed by administrators of this project at any time. Remove the stale label or comment to request for removal of it to prevent this.' + stale-pr-message: 'This PR is stale because it has been open for 120 days with no activity. It may be removed by administrators of this project at any time. Remove the stale label or comment to request for removal of it to prevent this.' + close-issue-message: 'This issue was closed because it has been stale for 120 days with no activity.' + close-pr-message: 'This PR was closed because it has been stale for 240 days with no activity.' + stale-issue-label: 'no-issue-activity' + stale-pr-label: 'no-pr-activity' + days-before-stale: 120 + days-before-close: -1 + days-before-pr-close: 240 + exempt-issue-labels: 'gsoc,good-first-issue,long-term-plan' + exempt-pr-labels: 'status:ready-for-merge,status:needs-testing,status:on-hold' diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e1a7db702204..49829caf125e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -25,6 +25,12 @@ repos: hooks: - id: identity - id: check-hooks-apply + - repo: https://github.com/thlorenz/doctoc.git + rev: v2.2.0 + hooks: + - id: doctoc + name: Add TOC for Markdown files + files: ^CONTRIBUTING\.md$|^INSTALL\.md$|^README\.md$ - repo: https://github.com/oxipng/oxipng rev: v9.1.5 hooks: @@ -41,6 +47,21 @@ repos: - repo: https://github.com/Lucas-C/pre-commit-hooks rev: v1.5.5 hooks: + - id: chmod + name: set file permissions + args: ['644'] + files: \.md$ + stages: [manual] + - id: insert-license + name: add license for all cfg files + description: automatically adds a licence header to all cfg files that don't have a license header + files: \.cfg$ + args: + - --comment-style + - '|#|' + - --license-filepath + - .github/workflows/license-templates/LICENSE.txt + - --fuzzy-match-generates-todo - id: insert-license name: add license for all Markdown files files: \.md$ @@ -51,6 +72,54 @@ repos: - .github/workflows/license-templates/LICENSE.txt - --fuzzy-match-generates-todo exclude: ^(CHANGES|ISSUE_TEMPLATE|PULL_REQUEST_TEMPLATE)\.md$|^ui/docs/(full|smoke)-test-plan\.template\.md$ + - id: insert-license + name: add license for all properties files + description: automatically adds a licence header to all properties files that don't have a license header + files: \.properties$ + args: + - --comment-style + - '|#|' + - --license-filepath + - .github/workflows/license-templates/LICENSE.txt + - --fuzzy-match-generates-todo + - id: insert-license + name: add license for all Shell files + description: automatically adds a licence header to all Shell files that don't have a license header + files: \.sh$ + args: + - --comment-style + - '|#|' + - --license-filepath + - .github/workflows/license-templates/LICENSE.txt + - --fuzzy-match-generates-todo + - id: insert-license + name: add license for all SQL files + files: \.sql$ + args: + - --comment-style + - '|--|' + - --license-filepath + - .github/workflows/license-templates/LICENSE.txt + - --fuzzy-match-generates-todo + - id: insert-license + name: add license for all Vue files + files: \.vue$ + args: + - --comment-style + - '|//|' + - --license-filepath + - .github/workflows/license-templates/LICENSE.txt + - --fuzzy-match-generates-todo + - id: insert-license + name: add license for all YAML files + description: automatically adds a licence header to all YAML files that don't have a license header + files: \.ya?ml$ + args: + - --comment-style + - '|#|' + - --license-filepath + - .github/workflows/license-templates/LICENSE.txt + - --fuzzy-match-generates-todo - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 hooks: @@ -84,7 +153,7 @@ repos: ^systemvm/agent/certs/realhostip\.key$| ^test/integration/smoke/test_ssl_offloading\.py$ - id: end-of-file-fixer - exclude: \.vhd$ + exclude: \.vhd$|\.svg$ - id: file-contents-sorter args: [--unique] files: ^\.github/linters/codespell\.txt$ @@ -92,11 +161,11 @@ repos: - id: forbid-submodules - id: mixed-line-ending - id: trailing-whitespace - files: \.(bat|cfg|cs|css|gitignore|header|in|install|java|md|properties|py|rb|rc|sh|sql|te|template|txt|ucls|vue|xml|xsl|yaml|yml)$|^cloud-cli/bindir/cloud-tool$|^debian/changelog$ + files: ^(LICENSE|NOTICE)$|\.(bat|cfg|cs|css|gitignore|header|in|install|java|md|properties|py|rb|rc|sh|sql|te|template|txt|ucls|vue|xml|xsl|yaml|yml)$|^cloud-cli/bindir/cloud-tool$|^debian/changelog$ args: [--markdown-linebreak-ext=md] exclude: ^services/console-proxy/rdpconsole/src/test/doc/freerdp-debug-log\.txt$ - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 + rev: v2.4.1 hooks: - id: codespell name: run codespell @@ -117,14 +186,6 @@ repos: args: [--config=.github/linters/.markdown-lint.yml] types: [markdown] files: \.(md|mdown|markdown)$ - - repo: https://github.com/Lucas-C/pre-commit-hooks - rev: v1.5.5 - hooks: - - id: chmod - name: set file permissions - args: ['644'] - files: \.md$ - stages: [manual] - repo: https://github.com/adrienverge/yamllint rev: v1.37.1 hooks: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 57b7a716cd4a..f0678ed76498 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,6 +21,24 @@ ## Summary + + + +- [Summary](#summary) +- [Bug fixes](#bug-fixes) +- [Developing new features](#developing-new-features) +- [PendingReleaseNotes file](#pendingreleasenotes-file) +- [Fork the code](#fork-the-code) +- [Making changes](#making-changes) +- [Rebase `feature_x` to include updates from `upstream/main`](#rebase-feature_x-to-include-updates-from-upstreammain) +- [Make a GitHub Pull Request to contribute your changes](#make-a-github-pull-request-to-contribute-your-changes) +- [Cleaning up after a successful pull request](#cleaning-up-after-a-successful-pull-request) +- [Release Principles](#release-principles) + + + +## Summary + This document covers how to contribute to the ACS project. ACS uses GitHub PRs to manage code contributions. These instructions assume you have a GitHub.com account, so if you don't have one you will have to create one. Your proposed code changes will be published to your own fork of the ACS project, and you will submit a Pull Request for your changes to be added. diff --git a/INSTALL.md b/INSTALL.md index 0619b97f21c1..52f109b0a411 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -26,9 +26,21 @@ or the developer [wiki](https://cwiki.apache.org/confluence/display/CLOUDSTACK/H Apache CloudStack developers use various platforms for development, this guide was tested against a CentOS 7 x86_64 setup. -* [Setting up development environment](https://cwiki.apache.org/confluence/display/CLOUDSTACK/Setting+up+CloudStack+Development+Environment) for Apache CloudStack. -* [Building](https://cwiki.apache.org/confluence/display/CLOUDSTACK/How+to+build+CloudStack) Apache CloudStack. -* [Appliance based development](https://github.com/rhtyd/monkeybox) + + + +- [Setting up Development Environment](#setting-up-development-environment) + - [Using jenv and/or pyenv for Version Management](#using-jenv-andor-pyenv-for-version-management) +- [Getting the Source Code](#getting-the-source-code) +- [Building](#building) +- [To bring up CloudStack UI](#to-bring-up-cloudstack-ui) +- [Building with non-redistributable plugins](#building-with-non-redistributable-plugins) +- [Packaging and Installation](#packaging-and-installation) + - [Debian/Ubuntu](#debianubuntu) + - [RHEL/CentOS](#rhelcentos) +- [Notes](#notes) + + ## Setting up Development Environment diff --git a/LICENSE b/LICENSE index 8be7d8083a5e..e61c431f5fad 100644 --- a/LICENSE +++ b/LICENSE @@ -177,14 +177,14 @@ Copyright (c) 2014 The Apache Software Foundation of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - + This distribution contains third party resources. Within the console-proxy/js directory licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - Copyright (c) 2009, John Resig - + Copyright (c) 2009, John Resig + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -192,10 +192,10 @@ Within the console-proxy/js directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -203,43 +203,43 @@ Within the console-proxy/js directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from John Resig - jquery.js + + from John Resig + jquery.js Within the systemvm/debian/etc directory placed in the public domain - by Adiscon GmbH http://www.adiscon.com/ - rsyslog.conf - by Simon Kelley - dnsmasq.conf - vpcdnsmasq.conf + by Adiscon GmbH http://www.adiscon.com/ + rsyslog.conf + by Simon Kelley + dnsmasq.conf + vpcdnsmasq.conf Within the systemvm/debian/etc/apache2 directory licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) Copyright (c) 2012 The Apache Software Foundation - from The Apache Software Foundation http://www.apache.org/ - httpd.conf + from The Apache Software Foundation http://www.apache.org/ + httpd.conf vhost.template Within the systemvm/debian/etc/ssh/ directory licensed under the BSD (2-clause) http://www.opensource.org/licenses/BSD-2-Clause (as follows) - - + + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - + Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - + Neither the name of the author nor the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. - + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -250,55 +250,55 @@ Within the systemvm/debian/etc/ssh/ directory ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - from OpenSSH Project http://www.openssh.org/ - sshd_config + + from OpenSSH Project http://www.openssh.org/ + sshd_config Within the systemvm/debian/root/redundant_router directory placed in the public domain - by The netfilter.org project http://www.netfilter.org/ - conntrackd.conf.templ + by The netfilter.org project http://www.netfilter.org/ + conntrackd.conf.templ Within the scripts/storage/secondary directory licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) Copyright (c) 2010-2011 OpenStack, LLC. - from OpenStack, LLC http://www.openstack.org - swift + from OpenStack, LLC http://www.openstack.org + swift Within the scripts/vm/hypervisor/xenserver directory licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) Copyright (c) 2010-2011 OpenStack, LLC. - from OpenStack, LLC http://www.openstack.org - swift + from OpenStack, LLC http://www.openstack.org + swift Within the ui/lib directory placed in the public domain - by Eric Meyer http://meyerweb.com/eric/ + by Eric Meyer http://meyerweb.com/eric/ reset.css from http://meyerweb.com/eric/tools/css/reset/ licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) Copyright (c) 2006 Google Inc. - from Google Inc. http://google.com + from Google Inc. http://google.com excanvas.js from http://code.google.com/p/explorercanvas/ licensed under the BSD (2-clause) http://www.opensource.org/licenses/BSD-2-Clause (as follows) Copyright (c) 2008 George McGinley Smith - All rights reserved. - + All rights reserved. + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - + Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - + Neither the name of the author nor the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. - + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -309,13 +309,13 @@ Within the ui/lib directory ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - from George McGinley Smith - jquery.easing.js + + from George McGinley Smith + jquery.easing.js licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -323,10 +323,10 @@ Within the ui/lib directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -334,14 +334,14 @@ Within the ui/lib directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from The Dojo Foundation http://dojofoundation.org/ + + from The Dojo Foundation http://dojofoundation.org/ require.js from http://github.com/jrburke/requirejs licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - Copyright (c) 2011, John Resig - + Copyright (c) 2011, John Resig + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -349,10 +349,10 @@ Within the ui/lib directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -360,14 +360,14 @@ Within the ui/lib directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from John Resig - jquery.js + + from John Resig + jquery.js licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) Copyright (c) 2014 Jörn Zaefferer - + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -375,10 +375,10 @@ Within the ui/lib directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -386,9 +386,9 @@ Within the ui/lib directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Jorn Zaefferer - jquery.validate.js + + from Jorn Zaefferer + jquery.validate.js licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) @@ -418,8 +418,8 @@ Within the ui/lib directory licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - Copyright (c) 2010, Sebastian Tschan - + Copyright (c) 2010, Sebastian Tschan + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -427,10 +427,10 @@ Within the ui/lib directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -438,14 +438,14 @@ Within the ui/lib directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Sebastian Tschan https://blueimp.net - jquery.md5.js + + from Sebastian Tschan https://blueimp.net + jquery.md5.js licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - Copyright (c) 2006 Klaus Hartl (stilbuero.de) - + Copyright (c) 2006 Klaus Hartl (stilbuero.de) + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -453,10 +453,10 @@ Within the ui/lib directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -464,15 +464,15 @@ Within the ui/lib directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Klaus Hartl http://stilbuero.de - jquery.cookies.js + + from Klaus Hartl http://stilbuero.de + jquery.cookies.js Within the ui/lib/flot directory licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - Released under the MIT license by IOLA, December 2007. - + Released under the MIT license by IOLA, December 2007. + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -480,10 +480,10 @@ Within the ui/lib/flot directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -491,24 +491,24 @@ Within the ui/lib/flot directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from IOLA http://www.iola.dk/ - jquery.flot.crosshair.js - jquery.flot.fillbetween.js - jquery.flot.image.js - jquery.flot.js - jquery.flot.navigate.js - jquery.flot.resize.js - jquery.flot.selection.js - jquery.flot.stack.js - jquery.flot.symbol.js - jquery.flot.threshold.js + + from IOLA http://www.iola.dk/ + jquery.flot.crosshair.js + jquery.flot.fillbetween.js + jquery.flot.image.js + jquery.flot.js + jquery.flot.navigate.js + jquery.flot.resize.js + jquery.flot.selection.js + jquery.flot.stack.js + jquery.flot.symbol.js + jquery.flot.threshold.js licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) Created by Brian Medendorp, June 2009 - Updated November 2009 with contributions from: btburnett3, Anthony Aragues and Xavi Ivars - + Updated November 2009 with contributions from: btburnett3, Anthony Aragues and Xavi Ivars + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -516,10 +516,10 @@ Within the ui/lib/flot directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -527,13 +527,13 @@ Within the ui/lib/flot directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Brian Medendorp - jquery.pie.js + + from Brian Medendorp + jquery.pie.js licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -541,10 +541,10 @@ Within the ui/lib/flot directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -552,14 +552,14 @@ Within the ui/lib/flot directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Ole Laursen - jquery.colorhelpers.js + + from Ole Laursen + jquery.colorhelpers.js Within the ui/lib/jquery-ui directory licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - - + + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -567,10 +567,10 @@ Within the ui/lib/jquery-ui directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -578,17 +578,17 @@ Within the ui/lib/jquery-ui directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from jQuery UI Developers http://jqueryui.com/about - css/jquery-ui.css - index.html - js/jquery-ui.js + + from jQuery UI Developers http://jqueryui.com/about + css/jquery-ui.css + index.html + js/jquery-ui.js Within the ui/lib/qunit directory licensed under the MIT License http://www.opensource.org/licenses/mit-license.php (as follows) - Copyright (c) 2012 John Resig, Jörn Zaefferer - + Copyright (c) 2012 John Resig, Jörn Zaefferer + Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including @@ -596,10 +596,10 @@ Within the ui/lib/qunit directory distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND @@ -607,20 +607,20 @@ Within the ui/lib/qunit directory LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - from Jorn Zaefferer + + from Jorn Zaefferer qunit.css from http://docs.jquery.com/QUnit qunit.js from http://docs.jquery.com/QUnit Within the utils/src/main/java/com/cloud/utils/db directory licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) Copyright (c) 2004 Clinton Begin - from Clinton Begin http://code.google.com/p/mybatis/ + from Clinton Begin http://code.google.com/p/mybatis/ ScriptRunner.java from http://code.google.com/p/mybatis/ Within the utils/src/main/java/org/apache/commons/httpclient/contrib/ssl directory licensed under the Apache License, Version 2 http://www.apache.org/licenses/LICENSE-2.0.txt (as above) Copyright (c) 2007 The Apache Software Foundation - from The Apache Software Foundation http://www.apache.org/ - EasySSLProtocolSocketFactory.java - EasyX509TrustManager.java + from The Apache Software Foundation http://www.apache.org/ + EasySSLProtocolSocketFactory.java + EasyX509TrustManager.java diff --git a/NOTICE b/NOTICE index b19e4a428530..8666be264b51 100644 --- a/NOTICE +++ b/NOTICE @@ -1,62 +1,62 @@ Apache CloudStack Copyright 2014 The Apache Software Foundation - + This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - - + + This distribution contains third party resources requiring the following notices: - - For + + For jquery.js - + jQuery JavaScript Library v1.3.2 http://jquery.com/ - + Copyright (c) 2009 John Resig Dual licensed under the MIT and GPL licenses. http://docs.jquery.com/License - + Date: 2009-02-19 17:34:21 -0500 (Thu, 19 Feb 2009) Revision: 6246 - - For + + For jquery.js - + jQuery JavaScript Library v1.6.4 http://jquery.com/ - + Copyright 2011, John Resig Dual licensed under the MIT or GPL Version 2 licenses. http://jquery.org/license - + Includes Sizzle.js http://sizzlejs.com/ Copyright 2011, The Dojo Foundation Released under the MIT, BSD, and GPL Licenses. - + Date: Mon Sep 12 18:54:48 2011 -0400 - - For + + For jquery.md5.js - + jQuery MD5 Plugin 1.2.1 https://github.com/blueimp/jQuery-MD5 - + Copyright 2010, Sebastian Tschan https://blueimp.net - + Licensed under the MIT license: http://creativecommons.org/licenses/MIT/ - + Based on A JavaScript implementation of the RSA Data Security, Inc. MD5 Message Digest Algorithm, as defined in RFC 1321. @@ -65,15 +65,15 @@ Copyright 2014 The Apache Software Foundation Distributed under the BSD License See http://pajhome.org.uk/crypt/md5 for more info. - - For + + For jquery.colorhelpers.js - + Plugin for jQuery for working with colors. - + Version 1.1. - + Inspiration from jQuery color animation plugin by John Resig. - + Released under the MIT license by Ole Laursen, October 2009. diff --git a/PRE-COMMIT.md b/PRE_COMMIT.md similarity index 97% rename from PRE-COMMIT.md rename to PRE_COMMIT.md index 9b76929d4223..62dc296c99e4 100644 --- a/PRE-COMMIT.md +++ b/PRE_COMMIT.md @@ -20,7 +20,7 @@ # pre-commit We run [pre-commit](https://pre-commit.com/) with -[GitHub Actions](https://github.com/apache/cloudstack/blob/main/.github/workflows/linter.yml) so installation on your +[GitHub Actions](https://github.com/apache/cloudstack/blob/main/.github/workflows/pre-commit.yml) so installation on your local machine is currently optional. The `pre-commit` [configuration file](https://github.com/apache/cloudstack/blob/main/.pre-commit-config.yaml) diff --git a/README.md b/README.md index a24131958a2c..a5aacb49f6b5 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,24 @@ [![Apache CloudStack](tools/logo/apache_cloudstack.png)](https://cloudstack.apache.org/) + + + +- [Who Uses CloudStack?](#who-uses-cloudstack) +- [Demo](#demo) +- [Getting Started](#getting-started) +- [Getting Source Repository](#getting-source-repository) +- [Documentation](#documentation) +- [News and Events](#news-and-events) +- [Getting Involved and Contributing](#getting-involved-and-contributing) +- [Reporting Security Vulnerabilities](#reporting-security-vulnerabilities) +- [License](#license) +- [Notice of Cryptographic Software](#notice-of-cryptographic-software) +- [Star History](#star-history) +- [Contributors](#contributors) + + + Apache CloudStack is open source software designed to deploy and manage large networks of virtual machines, as a highly available, highly scalable Infrastructure as a Service (IaaS) cloud computing platform. CloudStack is used diff --git a/agent/pom.xml b/agent/pom.xml index 4fa30e4f78e2..5ab6cfe17c13 100644 --- a/agent/pom.xml +++ b/agent/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.22.1.0-SNAPSHOT + 4.23.0.0-SNAPSHOT diff --git a/agent/src/main/java/com/cloud/agent/Agent.java b/agent/src/main/java/com/cloud/agent/Agent.java index 52b1fe392e8d..275fd41edc34 100644 --- a/agent/src/main/java/com/cloud/agent/Agent.java +++ b/agent/src/main/java/com/cloud/agent/Agent.java @@ -1322,7 +1322,6 @@ public void doTask(final Task task) throws TaskExecutionException { processResponse((Response)request, task.getLink()); } else { //put the requests from mgt server into another thread pool, as the request may take a longer time to finish. Don't block the NIO main thread pool - //processRequest(request, task.getLink()); requestHandler.submit(new AgentRequestHandler(getType(), getLink(), request)); } } catch (final ClassNotFoundException e) { diff --git a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java index 1561f0d5cfbc..3364f9708cf5 100644 --- a/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java +++ b/agent/src/main/java/com/cloud/agent/properties/AgentProperties.java @@ -117,7 +117,7 @@ public class AgentProperties{ /** * Local storage path.
- * This property allows multiple values to be entered in a single String. The differente values must be separated by commas.
+ * This property allows multiple values to be entered in a single String. The different values must be separated by commas.
* Data type: String.
* Default value: /var/lib/libvirt/images/ */ @@ -134,7 +134,7 @@ public class AgentProperties{ /** * MANDATORY: The UUID for the local storage pool.
- * This property allows multiple values to be entered in a single String. The differente values must be separated by commas.
+ * This property allows multiple values to be entered in a single String. The different values must be separated by commas.
* Data type: String.
* Default value: null */ diff --git a/api/pom.xml b/api/pom.xml index 405365451c6f..c80c35593451 100644 --- a/api/pom.xml +++ b/api/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.22.1.0-SNAPSHOT + 4.23.0.0-SNAPSHOT diff --git a/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java b/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java index ccb5362c8537..23167c5c53b0 100644 --- a/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java +++ b/api/src/main/java/com/cloud/agent/api/storage/OVFHelper.java @@ -119,8 +119,7 @@ protected OVFPropertyTO createOVFPropertyFromNode(Node node, int index, String c boolean password = StringUtils.isNotBlank(passStr) && passStr.equalsIgnoreCase("true"); String label = ovfParser.getChildNodeValue(node, "Label"); String description = ovfParser.getChildNodeValue(node, "Description"); - logger.debug("Creating OVF property index " + index + (category == null ? "" : " for category " + category) - + " with key = " + key); + logger.debug("Creating OVF property index {} {} with key = {}", index, (category == null ? "" : " for category " + category), key); return new OVFPropertyTO(key, type, value, qualifiers, userConfigurable, label, description, password, index, category); } @@ -152,7 +151,7 @@ public List getConfigurableOVFPropertiesFromDocument(Document doc if (child.getNodeName().equalsIgnoreCase("Category") || child.getNodeName().endsWith(":Category")) { lastCategoryFound = child.getTextContent(); - logger.info("Category found " + lastCategoryFound); + logger.info("Category found {}", lastCategoryFound); } else if (child.getNodeName().equalsIgnoreCase("Property") || child.getNodeName().endsWith(":Property")) { OVFPropertyTO prop = createOVFPropertyFromNode(child, propertyIndex, lastCategoryFound); @@ -250,13 +249,13 @@ private List matchHardwareItemsToDiskAndFilesInformation(List extractDisksFromOvfDocumentTree(Document doc) { od._controller = getControllerType(items, od._diskId); vd.add(od); } - if (logger.isTraceEnabled()) { - logger.trace(String.format("Found %d disk definitions", vd.size())); - } + logger.trace("Found {} disk definitions", vd.size()); return vd; } @@ -366,9 +363,7 @@ protected List extractFilesFromOvfDocumentTree(File ovfFile, Document d vf.add(of); } } - if (logger.isTraceEnabled()) { - logger.trace(String.format("Found %d file definitions in %s", vf.size(), ovfFile.getPath())); - } + logger.trace("Found {} file definitions in {}", vf.size(), ovfFile.getPath()); return vf; } @@ -506,7 +501,7 @@ private void writeDocumentToFile(String newOvfFilePath, Document doc) { outfile.write(writer.toString()); outfile.close(); } catch (IOException | TransformerException e) { - logger.info("Unexpected exception caught while rewriting OVF:" + e.getMessage(), e); + logger.info("Unexpected exception caught while rewriting OVF: {}", e.getMessage(), e); throw new CloudRuntimeException(e); } } @@ -522,9 +517,7 @@ OVFFile getFileDefinitionFromDiskDefinition(String fileRef, List files) public List getNetPrerequisitesFromDocument(Document doc) throws InternalErrorException { if (doc == null) { - if (logger.isTraceEnabled()) { - logger.trace("No document to parse; returning no prerequisite Networks"); - } + logger.trace("No document to parse; returning no prerequisite networks"); return Collections.emptyList(); } @@ -540,9 +533,7 @@ public List getNetPrerequisitesFromDocument(Document doc) throws I private void matchNicsToNets(Map nets, Node systemElement) { final DocumentTraversal traversal = (DocumentTraversal) systemElement; final NodeIterator iterator = traversal.createNodeIterator(systemElement, NodeFilter.SHOW_ELEMENT, null, true); - if (logger.isTraceEnabled()) { - logger.trace(String.format("Starting out with %d network-prerequisites, parsing hardware",nets.size())); - } + logger.trace("Starting out with {} network-prerequisites, parsing hardware", nets.size()); int nicCount = 0; for (Node n = iterator.nextNode(); n != null; n = iterator.nextNode()) { final Element e = (Element) n; @@ -550,9 +541,7 @@ private void matchNicsToNets(Map nets, Node systemElement) nicCount++; String name = e.getTextContent(); // should be in our nets if(nets.get(name) == null) { - if(logger.isInfoEnabled()) { - logger.info(String.format("Found a NIC definition without a Network definition by name %s, adding it to the list.", name)); - } + logger.info("Found a NIC definition without a Network definition by name {}, adding it to the list.", name); nets.put(name, new OVFNetworkTO()); } OVFNetworkTO thisNet = nets.get(name); @@ -561,9 +550,7 @@ private void matchNicsToNets(Map nets, Node systemElement) } } } - if (logger.isTraceEnabled()) { - logger.trace(String.format("Ending up with %d network-prerequisites, parsed %d NICs", nets.size(), nicCount)); - } + logger.trace("Ending up with {} network-prerequisites, parsed {} nics", nets.size(), nicCount); } /** @@ -585,7 +572,7 @@ private void fillNicPrerequisites(OVFNetworkTO nic, Node parentNode) { int addressOnParent = Integer.parseInt(addressOnParentStr); nic.setAddressOnParent(addressOnParent); } catch (NumberFormatException e) { - logger.warn("Encountered element of type \"AddressOnParent\", that could not be parse to an integer number: " + addressOnParentStr); + logger.warn("Encountered element of type \"AddressOnParent\", that could not be parse to an integer number: {}", addressOnParentStr); } boolean automaticAllocation = StringUtils.isNotBlank(automaticAllocationStr) && Boolean.parseBoolean(automaticAllocationStr); @@ -597,7 +584,7 @@ private void fillNicPrerequisites(OVFNetworkTO nic, Node parentNode) { int instanceId = Integer.parseInt(instanceIdStr); nic.setInstanceID(instanceId); } catch (NumberFormatException e) { - logger.warn("Encountered element of type \"InstanceID\", that could not be parse to an integer number: " + instanceIdStr); + logger.warn("Encountered element of type \"InstanceID\", that could not be parse to an integer number: {}", instanceIdStr); } nic.setResourceSubType(resourceSubType); @@ -630,9 +617,7 @@ private Map getNetworksFromDocumentTree(Document doc) { nets.put(networkName,network); } - if (logger.isTraceEnabled()) { - logger.trace(String.format("Found %d Networks in Template", nets.size())); - } + logger.trace("Found {} Networks in Template", nets.size()); return nets; } @@ -771,7 +756,7 @@ private Long getLongValueFromString(String value) { try { return Long.parseLong(value); } catch (NumberFormatException e) { - logger.debug("Could not parse the value: " + value + ", ignoring it"); + logger.debug("Could not parse the value: {}, ignoring it", value); } } return null; @@ -782,7 +767,7 @@ private Integer getIntValueFromString(String value) { try { return Integer.parseInt(value); } catch (NumberFormatException e) { - logger.debug("Could not parse the value: " + value + ", ignoring it"); + logger.debug("Could not parse the value: {}, ignoring it", value); } } return null; @@ -820,7 +805,7 @@ public List getEulaSectionsFromDocument(Document doc) { try { compressedLicense = compressOVFEula(eulaLicense); } catch (IOException e) { - logger.error("Could not compress the license for info " + eulaInfo); + logger.error("Could not compress the license for info {}", eulaInfo); continue; } OVFEulaSectionTO eula = new OVFEulaSectionTO(eulaInfo, compressedLicense, eulaIndex); diff --git a/api/src/main/java/com/cloud/agent/api/storage/OVFParser.java b/api/src/main/java/com/cloud/agent/api/storage/OVFParser.java index 38f478d63cf8..316ab4ea87b9 100644 --- a/api/src/main/java/com/cloud/agent/api/storage/OVFParser.java +++ b/api/src/main/java/com/cloud/agent/api/storage/OVFParser.java @@ -54,7 +54,7 @@ public OVFParser() { documentBuilderFactory.setNamespaceAware(true); documentBuilder = documentBuilderFactory.newDocumentBuilder(); } catch (ParserConfigurationException e) { - logger.error("Cannot start the OVF parser: " + e.getMessage(), e); + logger.error("Cannot start the OVF parser: {}", e.getMessage(), e); } } @@ -70,7 +70,7 @@ public Document parseOVFFile(String ovfFilePath) { try { return documentBuilder.parse(new File(ovfFilePath)); } catch (SAXException | IOException e) { - logger.error("Error parsing " + ovfFilePath + " " + e.getMessage(), e); + logger.error("Error parsing {} {}", ovfFilePath, e.getMessage(), e); return null; } } diff --git a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineMetadataTO.java b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineMetadataTO.java new file mode 100644 index 000000000000..5b22afdedd53 --- /dev/null +++ b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineMetadataTO.java @@ -0,0 +1,182 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.agent.api.to; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class VirtualMachineMetadataTO { + // VM details + private final String name; + private final String internalName; + private final String displayName; + private final String instanceUuid; + private final Integer cpuCores; + private final Integer memory; + private final Long created; + private final Long started; + + // Owner details + private final String ownerDomainUuid; + private final String ownerDomainName; + private final String ownerAccountUuid; + private final String ownerAccountName; + private final String ownerProjectUuid; + private final String ownerProjectName; + + // Host and service offering + private final String serviceOfferingName; + private final List serviceOfferingHostTags; + + // zone, pod, and cluster details + private final String zoneName; + private final String zoneUuid; + private final String podName; + private final String podUuid; + private final String clusterName; + private final String clusterUuid; + + // resource tags + private final Map resourceTags; + + public VirtualMachineMetadataTO( + String name, String internalName, String displayName, String instanceUuid, Integer cpuCores, Integer memory, Long created, Long started, + String ownerDomainUuid, String ownerDomainName, String ownerAccountUuid, String ownerAccountName, String ownerProjectUuid, String ownerProjectName, + String serviceOfferingName, List serviceOfferingHostTags, + String zoneName, String zoneUuid, String podName, String podUuid, String clusterName, String clusterUuid, Map resourceTags) { + /* + * Something failed in the metadata shall not be a fatal error, the VM can still be started + * Thus, the unknown fields just get an explicit "unknown" value so it can be fixed in case + * there are bugs on some execution paths. + * */ + + this.name = (name != null) ? name : "unknown"; + this.internalName = (internalName != null) ? internalName : "unknown"; + this.displayName = (displayName != null) ? displayName : "unknown"; + this.instanceUuid = (instanceUuid != null) ? instanceUuid : "unknown"; + this.cpuCores = (cpuCores != null) ? cpuCores : -1; + this.memory = (memory != null) ? memory : -1; + this.created = (created != null) ? created : 0; + this.started = (started != null) ? started : 0; + this.ownerDomainUuid = (ownerDomainUuid != null) ? ownerDomainUuid : "unknown"; + this.ownerDomainName = (ownerDomainName != null) ? ownerDomainName : "unknown"; + this.ownerAccountUuid = (ownerAccountUuid != null) ? ownerAccountUuid : "unknown"; + this.ownerAccountName = (ownerAccountName != null) ? ownerAccountName : "unknown"; + this.ownerProjectUuid = (ownerProjectUuid != null) ? ownerProjectUuid : "unknown"; + this.ownerProjectName = (ownerProjectName != null) ? ownerProjectName : "unknown"; + this.serviceOfferingName = (serviceOfferingName != null) ? serviceOfferingName : "unknown"; + this.serviceOfferingHostTags = (serviceOfferingHostTags != null) ? serviceOfferingHostTags : new ArrayList<>(); + this.zoneName = (zoneName != null) ? zoneName : "unknown"; + this.zoneUuid = (zoneUuid != null) ? zoneUuid : "unknown"; + this.podName = (podName != null) ? podName : "unknown"; + this.podUuid = (podUuid != null) ? podUuid : "unknown"; + this.clusterName = (clusterName != null) ? clusterName : "unknown"; + this.clusterUuid = (clusterUuid != null) ? clusterUuid : "unknown"; + + this.resourceTags = (resourceTags != null) ? resourceTags : new HashMap<>(); + } + + public String getName() { + return name; + } + + public String getInternalName() { + return internalName; + } + + public String getDisplayName() { + return displayName; + } + + public String getInstanceUuid() { + return instanceUuid; + } + + public Integer getCpuCores() { + return cpuCores; + } + + public Integer getMemory() { + return memory; + } + + public Long getCreated() { return created; } + + public Long getStarted() { + return started; + } + + public String getOwnerDomainUuid() { + return ownerDomainUuid; + } + + public String getOwnerDomainName() { + return ownerDomainName; + } + + public String getOwnerAccountUuid() { + return ownerAccountUuid; + } + + public String getOwnerAccountName() { + return ownerAccountName; + } + + public String getOwnerProjectUuid() { + return ownerProjectUuid; + } + + public String getOwnerProjectName() { + return ownerProjectName; + } + + public String getserviceOfferingName() { + return serviceOfferingName; + } + + public List getserviceOfferingHostTags() { + return serviceOfferingHostTags; + } + + public String getZoneName() { + return zoneName; + } + + public String getZoneUuid() { + return zoneUuid; + } + + public String getPodName() { + return podName; + } + + public String getPodUuid() { + return podUuid; + } + + public String getClusterName() { + return clusterName; + } + + public String getClusterUuid() { + return clusterUuid; + } + + public Map getResourceTags() { return resourceTags; } +} diff --git a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java index cffb98740805..e26cc1e9f029 100644 --- a/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java +++ b/api/src/main/java/com/cloud/agent/api/to/VirtualMachineTO.java @@ -89,6 +89,7 @@ public class VirtualMachineTO { private DeployAsIsInfoTO deployAsIsInfo; private String metadataManufacturer; private String metadataProductName; + private VirtualMachineMetadataTO metadata; public VirtualMachineTO(long id, String instanceName, VirtualMachine.Type type, int cpus, Integer speed, long minRam, long maxRam, BootloaderType bootloader, String os, boolean enableHA, boolean limitCpuUse, String vncPassword) { @@ -494,6 +495,14 @@ public void setMetadataProductName(String metadataProductName) { this.metadataProductName = metadataProductName; } + public VirtualMachineMetadataTO getMetadata() { + return metadata; + } + + public void setMetadata(VirtualMachineMetadataTO metadata) { + this.metadata = metadata; + } + @Override public String toString() { return String.format("VM {id: \"%s\", name: \"%s\", uuid: \"%s\", type: \"%s\"}", id, name, uuid, type); diff --git a/api/src/main/java/com/cloud/cpu/CPU.java b/api/src/main/java/com/cloud/cpu/CPU.java index 3016e542db65..11b38b73da53 100644 --- a/api/src/main/java/com/cloud/cpu/CPU.java +++ b/api/src/main/java/com/cloud/cpu/CPU.java @@ -22,7 +22,8 @@ public class CPU { public enum CPUArch { x86("i686", 32), amd64("x86_64", 64), - arm64("aarch64", 64); + arm64("aarch64", 64), + s390x("s390x", 64); private final String type; private final int bits; diff --git a/api/src/main/java/com/cloud/deploy/DeploymentClusterPlanner.java b/api/src/main/java/com/cloud/deploy/DeploymentClusterPlanner.java index d127e4bdd660..9471c3d5c84c 100644 --- a/api/src/main/java/com/cloud/deploy/DeploymentClusterPlanner.java +++ b/api/src/main/java/com/cloud/deploy/DeploymentClusterPlanner.java @@ -62,11 +62,11 @@ public interface DeploymentClusterPlanner extends DeploymentPlanner { "vm.allocation.algorithm", "Advanced", "random", - "Order in which hosts within a cluster will be considered for VM allocation. The value can be 'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit', or 'firstfitleastconsumed'.", + "Order in which hosts within a cluster will be considered for VM allocation. The value can be 'random', 'firstfit', 'userdispersing', or 'firstfitleastconsumed'.", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.Select, - "random,firstfit,userdispersing,userconcentratedpod_random,userconcentratedpod_firstfit,firstfitleastconsumed"); + "random,firstfit,userdispersing,firstfitleastconsumed"); /** * This is called to determine list of possible clusters where a virtual diff --git a/api/src/main/java/com/cloud/deploy/DeploymentPlanner.java b/api/src/main/java/com/cloud/deploy/DeploymentPlanner.java index 354f9cfaac53..8f7e773070f0 100644 --- a/api/src/main/java/com/cloud/deploy/DeploymentPlanner.java +++ b/api/src/main/java/com/cloud/deploy/DeploymentPlanner.java @@ -70,7 +70,7 @@ public interface DeploymentPlanner extends Adapter { boolean canHandle(VirtualMachineProfile vm, DeploymentPlan plan, ExcludeList avoid); public enum AllocationAlgorithm { - random, firstfit, userdispersing, userconcentratedpod_random, userconcentratedpod_firstfit; + random, firstfit, userdispersing; } public enum PlannerResourceUsage { diff --git a/api/src/main/java/com/cloud/event/EventTypes.java b/api/src/main/java/com/cloud/event/EventTypes.java index 38e601c790a7..d2989a8ffdc9 100644 --- a/api/src/main/java/com/cloud/event/EventTypes.java +++ b/api/src/main/java/com/cloud/event/EventTypes.java @@ -503,6 +503,7 @@ public class EventTypes { public static final String EVENT_S2S_VPN_CUSTOMER_GATEWAY_CREATE = "VPN.S2S.CUSTOMER.GATEWAY.CREATE"; public static final String EVENT_S2S_VPN_CUSTOMER_GATEWAY_DELETE = "VPN.S2S.CUSTOMER.GATEWAY.DELETE"; public static final String EVENT_S2S_VPN_CUSTOMER_GATEWAY_UPDATE = "VPN.S2S.CUSTOMER.GATEWAY.UPDATE"; + public static final String EVENT_S2S_VPN_GATEWAY_OBSOLETE_PARAMS = "VPN.S2S.GATEWAY.OBSOLETE.PARAMS"; public static final String EVENT_S2S_VPN_CONNECTION_CREATE = "VPN.S2S.CONNECTION.CREATE"; public static final String EVENT_S2S_VPN_CONNECTION_DELETE = "VPN.S2S.CONNECTION.DELETE"; public static final String EVENT_S2S_VPN_CONNECTION_RESET = "VPN.S2S.CONNECTION.RESET"; @@ -1151,6 +1152,7 @@ public class EventTypes { entityEventDetails.put(EVENT_S2S_VPN_CUSTOMER_GATEWAY_CREATE, Site2SiteCustomerGateway.class); entityEventDetails.put(EVENT_S2S_VPN_CUSTOMER_GATEWAY_DELETE, Site2SiteCustomerGateway.class); entityEventDetails.put(EVENT_S2S_VPN_CUSTOMER_GATEWAY_UPDATE, Site2SiteCustomerGateway.class); + entityEventDetails.put(EVENT_S2S_VPN_GATEWAY_OBSOLETE_PARAMS, Site2SiteCustomerGateway.class); entityEventDetails.put(EVENT_S2S_VPN_CONNECTION_CREATE, Site2SiteVpnConnection.class); entityEventDetails.put(EVENT_S2S_VPN_CONNECTION_DELETE, Site2SiteVpnConnection.class); entityEventDetails.put(EVENT_S2S_VPN_CONNECTION_RESET, Site2SiteVpnConnection.class); diff --git a/api/src/main/java/com/cloud/host/HostStats.java b/api/src/main/java/com/cloud/host/HostStats.java index d14794401fa3..0e72b5f2d9d0 100644 --- a/api/src/main/java/com/cloud/host/HostStats.java +++ b/api/src/main/java/com/cloud/host/HostStats.java @@ -36,5 +36,4 @@ public interface HostStats { public HostStats getHostStats(); public double getLoadAverage(); - // public double getXapiMemoryUsageKBs(); } diff --git a/api/src/main/java/com/cloud/hypervisor/Hypervisor.java b/api/src/main/java/com/cloud/hypervisor/Hypervisor.java index 1f8741d3b7b2..13ef7a6871d4 100644 --- a/api/src/main/java/com/cloud/hypervisor/Hypervisor.java +++ b/api/src/main/java/com/cloud/hypervisor/Hypervisor.java @@ -47,7 +47,6 @@ public enum Functionality { public static final HypervisorType XenServer = new HypervisorType("XenServer", ImageFormat.VHD, EnumSet.of(RootDiskSizeOverride, VmStorageMigration)); public static final HypervisorType KVM = new HypervisorType("KVM", ImageFormat.QCOW2, EnumSet.of(DirectDownloadTemplate, RootDiskSizeOverride, VmStorageMigration)); public static final HypervisorType VMware = new HypervisorType("VMware", ImageFormat.OVA, EnumSet.of(RootDiskSizeOverride, VmStorageMigration, VmStorageMigrationWithSnapshots)); - public static final HypervisorType Hyperv = new HypervisorType("Hyperv"); public static final HypervisorType VirtualBox = new HypervisorType("VirtualBox"); public static final HypervisorType Parralels = new HypervisorType("Parralels"); public static final HypervisorType BareMetal = new HypervisorType("BareMetal"); diff --git a/api/src/main/java/com/cloud/network/PhysicalNetworkTrafficType.java b/api/src/main/java/com/cloud/network/PhysicalNetworkTrafficType.java index 9676badb4e90..d3804cd29daf 100644 --- a/api/src/main/java/com/cloud/network/PhysicalNetworkTrafficType.java +++ b/api/src/main/java/com/cloud/network/PhysicalNetworkTrafficType.java @@ -41,4 +41,6 @@ public interface PhysicalNetworkTrafficType extends InternalIdentity, Identity { String getHypervNetworkLabel(); String getOvm3NetworkLabel(); + + String getVlan(); } diff --git a/api/src/main/java/com/cloud/network/rules/LbStickinessMethod.java b/api/src/main/java/com/cloud/network/rules/LbStickinessMethod.java index 56a0622a52ba..5143611ee828 100644 --- a/api/src/main/java/com/cloud/network/rules/LbStickinessMethod.java +++ b/api/src/main/java/com/cloud/network/rules/LbStickinessMethod.java @@ -108,8 +108,7 @@ public LbStickinessMethod(StickinessMethodType methodType, String description) { } public void addParam(String name, Boolean required, String description, Boolean isFlag) { - /* FIXME : UI is breaking if the capability string length is larger , temporarily description is commented out */ - // LbStickinessMethodParam param = new LbStickinessMethodParam(name, required, description); + /* is this still a valid comment: FIXME : UI is breaking if the capability string length is larger , temporarily description is commented out */ LbStickinessMethodParam param = new LbStickinessMethodParam(name, required, " ", isFlag); _paramList.add(param); return; @@ -133,7 +132,6 @@ public String getDescription() { public void setDescription(String description) { /* FIXME : UI is breaking if the capability string length is larger , temporarily description is commented out */ - //this.description = description; this._description = " "; } } diff --git a/api/src/main/java/com/cloud/server/ResourceTag.java b/api/src/main/java/com/cloud/server/ResourceTag.java index b3026deceff8..32305753f1ae 100644 --- a/api/src/main/java/com/cloud/server/ResourceTag.java +++ b/api/src/main/java/com/cloud/server/ResourceTag.java @@ -16,14 +16,14 @@ // under the License. package com.cloud.server; -import org.apache.cloudstack.acl.ControlledEntity; -import org.apache.cloudstack.api.Identity; -import org.apache.cloudstack.api.InternalIdentity; - import java.util.HashMap; import java.util.Locale; import java.util.Map; +import org.apache.cloudstack.acl.ControlledEntity; +import org.apache.cloudstack.api.Identity; +import org.apache.cloudstack.api.InternalIdentity; + public interface ResourceTag extends ControlledEntity, Identity, InternalIdentity { // FIXME - extract enum to another interface as its used both by resourceTags and resourceMetaData code @@ -70,7 +70,7 @@ public enum ResourceObjectType { GuestOs(false, true), NetworkOffering(false, true), VpcOffering(true, false), - Domain(false, false, true), + Domain(true, false, true), ObjectStore(false, false, true); diff --git a/api/src/main/java/com/cloud/storage/Storage.java b/api/src/main/java/com/cloud/storage/Storage.java index 1ad3731b9eaa..5b3e97698fda 100644 --- a/api/src/main/java/com/cloud/storage/Storage.java +++ b/api/src/main/java/com/cloud/storage/Storage.java @@ -128,7 +128,7 @@ public static enum FileSystem { public static enum TemplateType { ROUTING, // Router template SYSTEM, /* routing, system vm template */ - BUILTIN, /* buildin template */ + BUILTIN, /* builtin template */ PERHOST, /* every host has this template, don't need to install it in secondary storage */ USER, /* User supplied template/iso */ VNF, /* VNFs (virtual network functions) template */ diff --git a/api/src/main/java/com/cloud/user/AccountService.java b/api/src/main/java/com/cloud/user/AccountService.java index 09fe5ffc0590..b92654bfe174 100644 --- a/api/src/main/java/com/cloud/user/AccountService.java +++ b/api/src/main/java/com/cloud/user/AccountService.java @@ -36,6 +36,7 @@ import com.cloud.offering.NetworkOffering; import com.cloud.offering.ServiceOffering; import org.apache.cloudstack.auth.UserTwoFactorAuthenticator; +import org.apache.cloudstack.backup.BackupOffering; public interface AccountService { @@ -115,13 +116,15 @@ User createUser(String userName, String password, String firstName, String lastN void checkAccess(Account account, VpcOffering vof, DataCenter zone) throws PermissionDeniedException; + void checkAccess(Account account, BackupOffering bof) throws PermissionDeniedException; + void checkAccess(User user, ControlledEntity entity); void checkAccess(Account account, AccessType accessType, boolean sameOwner, String apiName, ControlledEntity... entities) throws PermissionDeniedException; void validateAccountHasAccessToResource(Account account, AccessType accessType, Object resource); - Long finalyzeAccountId(String accountName, Long domainId, Long projectId, boolean enabledOnly); + Long finalizeAccountId(String accountName, Long domainId, Long projectId, boolean enabledOnly); /** * returns the user account object for a given user id diff --git a/api/src/main/java/org/apache/cloudstack/acl/RoleType.java b/api/src/main/java/org/apache/cloudstack/acl/RoleType.java index c721d52804c6..c33488cd9239 100644 --- a/api/src/main/java/org/apache/cloudstack/acl/RoleType.java +++ b/api/src/main/java/org/apache/cloudstack/acl/RoleType.java @@ -132,10 +132,10 @@ public static Set fromCombinedMask(int combinedMask) { * */ public static Account.Type getAccountTypeByRole(final Role role, final Account.Type defautAccountType) { if (role != null) { - LOGGER.debug(String.format("Role [%s] is not null; therefore, we use its Account type [%s].", role, defautAccountType)); + LOGGER.debug("Role [{}] is not null; therefore, we use its Account type [{}].", role, defautAccountType); return role.getRoleType().getAccountType(); } - LOGGER.debug(String.format("Role is null; therefore, we use the default Account type [%s] value.", defautAccountType)); + LOGGER.debug("Role is null; therefore, we use the default Account type [{}] value.", defautAccountType); return defautAccountType; } } diff --git a/api/src/main/java/org/apache/cloudstack/acl/SecurityChecker.java b/api/src/main/java/org/apache/cloudstack/acl/SecurityChecker.java index 82a8ec5fe932..fa17df7c6ed4 100644 --- a/api/src/main/java/org/apache/cloudstack/acl/SecurityChecker.java +++ b/api/src/main/java/org/apache/cloudstack/acl/SecurityChecker.java @@ -27,6 +27,8 @@ import com.cloud.user.User; import com.cloud.utils.component.Adapter; +import org.apache.cloudstack.backup.BackupOffering; + /** * SecurityChecker checks the ownership and access control to objects within */ @@ -145,4 +147,6 @@ boolean checkAccess(Account caller, AccessType accessType, String action, Contro boolean checkAccess(Account account, NetworkOffering nof, DataCenter zone) throws PermissionDeniedException; boolean checkAccess(Account account, VpcOffering vof, DataCenter zone) throws PermissionDeniedException; + + boolean checkAccess(Account account, BackupOffering bof) throws PermissionDeniedException; } diff --git a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java index d8e471756a02..cc3188feeca5 100644 --- a/api/src/main/java/org/apache/cloudstack/alert/AlertService.java +++ b/api/src/main/java/org/apache/cloudstack/alert/AlertService.java @@ -74,6 +74,7 @@ private AlertType(short type, String name, boolean isDefault) { public static final AlertType ALERT_TYPE_VR_PUBLIC_IFACE_MTU = new AlertType((short)32, "ALERT.VR.PUBLIC.IFACE.MTU", true); public static final AlertType ALERT_TYPE_VR_PRIVATE_IFACE_MTU = new AlertType((short)32, "ALERT.VR.PRIVATE.IFACE.MTU", true); public static final AlertType ALERT_TYPE_EXTENSION_PATH_NOT_READY = new AlertType((short)33, "ALERT.TYPE.EXTENSION.PATH.NOT.READY", true); + public static final AlertType ALERT_TYPE_VPN_GATEWAY_OBSOLETE_PARAMETERS = new AlertType((short)34, "ALERT.S2S.VPN.GATEWAY.OBSOLETE.PARAMETERS", true); public static final AlertType ALERT_TYPE_BACKUP_STORAGE = new AlertType(Capacity.CAPACITY_TYPE_BACKUP_STORAGE, "ALERT.STORAGE.BACKUP", true); public static final AlertType ALERT_TYPE_OBJECT_STORAGE = new AlertType(Capacity.CAPACITY_TYPE_OBJECT_STORAGE, "ALERT.STORAGE.OBJECT", true); diff --git a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java index 944b111eb70c..d24d2d9dab95 100644 --- a/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java +++ b/api/src/main/java/org/apache/cloudstack/api/ApiConstants.java @@ -375,6 +375,7 @@ public class ApiConstants { public static final String MAC_ADDRESS = "macaddress"; public static final String MAC_ADDRESSES = "macaddresses"; public static final String MANUAL_UPGRADE = "manualupgrade"; + public static final String MATCH_TYPE = "matchtype"; public static final String MAX = "max"; public static final String MAX_SNAPS = "maxsnaps"; public static final String MAX_BACKUPS = "maxbackups"; @@ -1069,7 +1070,6 @@ public class ApiConstants { public static final String AUTOSCALE_VMGROUP_NAME = "autoscalevmgroupname"; public static final String BAREMETAL_DISCOVER_NAME = "baremetaldiscovername"; public static final String BAREMETAL_RCT_URL = "baremetalrcturl"; - public static final String UCS_DN = "ucsdn"; public static final String GSLB_PROVIDER = "gslbprovider"; public static final String EXCLUSIVE_GSLB_PROVIDER = "isexclusivegslbprovider"; public static final String GSLB_PROVIDER_PUBLIC_IP = "gslbproviderpublicip"; @@ -1086,10 +1086,6 @@ public class ApiConstants { public static final String GUEST_VM_CIDR = "guestvmcidr"; public static final String NETWORK_CIDR = "networkcidr"; public static final String RESERVED_IP_RANGE = "reservediprange"; - public static final String UCS_MANAGER_ID = "ucsmanagerid"; - public static final String UCS_PROFILE_DN = "profiledn"; - public static final String UCS_BLADE_DN = "bladedn"; - public static final String UCS_BLADE_ID = "bladeid"; public static final String VM_GUEST_IP = "vmguestip"; public static final String HEALTHCHECK_FAILED = "healthchecksfailed"; public static final String HEALTHCHECK_RESPONSE_TIMEOUT = "responsetimeout"; @@ -1364,6 +1360,10 @@ public class ApiConstants { public static final String RECURSIVE_DOMAINS = "recursivedomains"; + public static final String VPN_CUSTOMER_GATEWAY_PARAMETERS = "vpncustomergatewayparameters"; + public static final String OBSOLETE_PARAMETERS = "obsoleteparameters"; + public static final String EXCLUDED_PARAMETERS = "excludedparameters"; + /** * This enum specifies IO Drivers, each option controls specific policies on I/O. * Qemu guests support "threads" and "native" options Since 0.8.8 ; "io_uring" is supported Since 6.3.0 (QEMU 5.0). diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseBackupListCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseBackupListCmd.java index 0aa8366bcd5c..2a64a1fb6fd8 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseBackupListCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseBackupListCmd.java @@ -25,7 +25,7 @@ import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.context.CallContext; -public abstract class BaseBackupListCmd extends BaseListCmd { +public abstract class BaseBackupListCmd extends BaseListAccountResourcesCmd { protected void setupResponseBackupOfferingsList(final List offerings, final Integer count) { final ListResponse response = new ListResponse<>(); diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java index 8f47d51b19d4..a4de301cc991 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseCmd.java @@ -382,7 +382,7 @@ public List getParamFields() { if (roleIsAllowed) { validFields.add(field); } else { - logger.debug("Ignoring parameter " + parameterAnnotation.name() + " as the caller is not authorized to pass it in"); + logger.debug("Ignoring parameter {} as the caller is not authorized to pass it in", parameterAnnotation.name()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java index f0a82ccb9a63..6da9db57ee30 100644 --- a/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/BaseUpdateTemplateOrIsoCmd.java @@ -84,7 +84,7 @@ public abstract class BaseUpdateTemplateOrIsoCmd extends BaseCmd { private Boolean cleanupDetails; @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, - description = "the CPU arch of the template/ISO. Valid options are: x86_64, aarch64", + description = "the CPU arch of the template/ISO. Valid options are: x86_64, aarch64, s390x", since = "4.20") private String arch; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/DeleteCounterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/DeleteCounterCmd.java index fb0c9606c512..769f6fd8b142 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/DeleteCounterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/autoscale/DeleteCounterCmd.java @@ -61,7 +61,7 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); this.setResponseObject(response); } else { - logger.warn("Failed to delete counter with Id: " + getId()); + logger.warn("Failed to delete counter with Id: {}", getId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete counter."); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ImportBackupOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ImportBackupOfferingCmd.java index 2e73698e7aa1..5e702585a2c3 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ImportBackupOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/ImportBackupOfferingCmd.java @@ -27,6 +27,7 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.BackupOfferingResponse; +import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.api.response.ZoneResponse; import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.backup.BackupOffering; @@ -40,6 +41,11 @@ import com.cloud.exception.ResourceAllocationException; import com.cloud.exception.ResourceUnavailableException; import com.cloud.utils.exception.CloudRuntimeException; +import org.apache.commons.collections.CollectionUtils; + +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; @APICommand(name = "importBackupOffering", description = "Imports a backup offering using a backup provider", @@ -76,6 +82,13 @@ public class ImportBackupOfferingCmd extends BaseAsyncCmd { description = "Whether users are allowed to create adhoc backups and backup schedules", required = true) private Boolean userDrivenBackups; + @Parameter(name = ApiConstants.DOMAIN_ID, + type = CommandType.LIST, + collectionType = CommandType.UUID, + entityType = DomainResponse.class, + description = "the ID of the containing domain(s), null for public offerings") + private List domainIds; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -100,6 +113,15 @@ public Boolean getUserDrivenBackups() { return userDrivenBackups == null ? false : userDrivenBackups; } + public List getDomainIds() { + if (CollectionUtils.isNotEmpty(domainIds)) { + Set set = new LinkedHashSet<>(domainIds); + domainIds.clear(); + domainIds.addAll(set); + } + return domainIds; + } + ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/UpdateBackupOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/UpdateBackupOfferingCmd.java index 9de06715ee74..2f0dd6acd0e1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/UpdateBackupOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/backup/UpdateBackupOfferingCmd.java @@ -25,19 +25,24 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.offering.DomainAndZoneIdResolver; import org.apache.cloudstack.api.response.BackupOfferingResponse; import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import com.cloud.exception.InvalidParameterValueException; import com.cloud.user.Account; import com.cloud.utils.exception.CloudRuntimeException; +import java.util.List; +import java.util.function.LongFunction; + @APICommand(name = "updateBackupOffering", description = "Updates a backup offering.", responseObject = BackupOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false, since = "4.16.0") -public class UpdateBackupOfferingCmd extends BaseCmd { +public class UpdateBackupOfferingCmd extends BaseCmd implements DomainAndZoneIdResolver { @Inject private BackupManager backupManager; @@ -57,6 +62,13 @@ public class UpdateBackupOfferingCmd extends BaseCmd { @Parameter(name = ApiConstants.ALLOW_USER_DRIVEN_BACKUPS, type = CommandType.BOOLEAN, description = "Whether to allow user driven backups or not") private Boolean allowUserDrivenBackups; + @Parameter(name = ApiConstants.DOMAIN_ID, + type = CommandType.STRING, + description = "the ID of the containing domain(s) as comma separated string, public for public offerings", + since = "4.23.0", + length = 4096) + private String domainIds; + ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// ///////////////////////////////////////////////////// @@ -82,7 +94,7 @@ public Boolean getAllowUserDrivenBackups() { @Override public void execute() { try { - if (StringUtils.isAllEmpty(getName(), getDescription()) && getAllowUserDrivenBackups() == null) { + if (StringUtils.isAllEmpty(getName(), getDescription()) && getAllowUserDrivenBackups() == null && CollectionUtils.isEmpty(getDomainIds())) { throw new InvalidParameterValueException(String.format("Can't update Backup Offering [id: %s] because there are no parameters to be updated, at least one of the", "following should be informed: name, description or allowUserDrivenBackups.", id)); } @@ -98,11 +110,23 @@ public void execute() { this.setResponseObject(response); } catch (CloudRuntimeException e) { ApiErrorCode paramError = e instanceof InvalidParameterValueException ? ApiErrorCode.PARAM_ERROR : ApiErrorCode.INTERNAL_ERROR; - logger.error(String.format("Failed to update Backup Offering [id: %s] due to: [%s].", id, e.getMessage()), e); + logger.error("Failed to update Backup Offering [id: {}] due to: [{}].", id, e.getMessage(), e); throw new ServerApiException(paramError, e.getMessage()); } } + public List getDomainIds() { + // backupManager may be null in unit tests where the command is spied without injection. + // Avoid creating a method reference to a null receiver which causes NPE. When backupManager + // is null, pass null as the defaultDomainsProvider so resolveDomainIds will simply return + // an empty list or parse the explicit domainIds string. + LongFunction> defaultDomainsProvider = null; + if (backupManager != null) { + defaultDomainsProvider = backupManager::getBackupOfferingDomains; + } + return resolveDomainIds(domainIds, id, defaultDomainsProvider, "backup offering"); + } + @Override public long getEntityOwnerId() { return Account.ACCOUNT_ID_SYSTEM; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java index 5d1c1f8a6fd5..68de3836321c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/AddClusterCmd.java @@ -65,11 +65,11 @@ public class AddClusterCmd extends BaseCmd { @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, required = true, - description = "Hypervisor type of the cluster: XenServer,KVM,VMware,Hyperv,BareMetal,Simulator,Ovm3,External") + description = "Hypervisor type of the cluster: XenServer,KVM,VMware,BareMetal,Simulator,External") private String hypervisor; @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, - description = "The CPU arch of the cluster. Valid options are: x86_64, aarch64", + description = "The CPU arch of the cluster. Valid options are: x86_64, aarch64, s390x", since = "4.20") private String arch; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java index 2e7cb217d27a..77d0557af05b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/cluster/UpdateClusterCmd.java @@ -58,7 +58,7 @@ public class UpdateClusterCmd extends BaseCmd { private String managedState; @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, - description = "the CPU arch of the cluster. Valid options are: x86_64, aarch64", + description = "the CPU arch of the cluster. Valid options are: x86_64, aarch64, s390x", since = "4.20") private String arch; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java index 2fad8d71c68b..97dee8f638af 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmd.java @@ -150,7 +150,7 @@ public void execute() { ConfigurationResponse response = _responseGenerator.createConfigurationResponse(cfg); response.setResponseName(getCommandName()); response = setResponseScopes(response); - response = setResponseValue(response, cfg); + setResponseValue(response, cfg); this.setResponseObject(response); } else { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to update config"); @@ -161,15 +161,13 @@ public void execute() { * Sets the configuration value in the response. If the configuration is in the `Hidden` or `Secure` categories, the value is encrypted before being set in the response. * @param response to be set with the configuration `cfg` value * @param cfg to be used in setting the response value - * @return the response with the configuration's value */ - public ConfigurationResponse setResponseValue(ConfigurationResponse response, Configuration cfg) { + public void setResponseValue(ConfigurationResponse response, Configuration cfg) { + String value = cfg.getValue(); if (cfg.isEncrypted()) { - response.setValue(DBEncryptionUtil.encrypt(getValue())); - } else { - response.setValue(getValue()); + value = DBEncryptionUtil.encrypt(value); } - return response; + response.setValue(value); } /** diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java index 780198dded59..ad440376a913 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/direct/download/UploadTemplateDirectDownloadCertificateCmd.java @@ -95,7 +95,7 @@ public void execute() { } try { - logger.debug("Uploading certificate " + name + " to agents for Direct Download"); + logger.debug("Uploading certificate {} to agents for Direct Download", name); Pair> uploadStatus = directDownloadManager.uploadCertificateToHosts(certificate, name, hypervisor, zoneId, hostId); DirectDownloadCertificate certificate = uploadStatus.first(); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java index 5c5a92c45cac..aa1978042265 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/domain/ListDomainsCmd.java @@ -23,7 +23,7 @@ import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.ApiConstants.DomainDetails; -import org.apache.cloudstack.api.BaseListCmd; +import org.apache.cloudstack.api.BaseListTaggedResourcesCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ResponseObject.ResponseView; import org.apache.cloudstack.api.command.user.UserCmd; @@ -39,7 +39,7 @@ @APICommand(name = "listDomains", description = "Lists domains and provides detailed information for listed domains", responseObject = DomainResponse.class, responseView = ResponseView.Restricted, entityType = {Domain.class}, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) -public class ListDomainsCmd extends BaseListCmd implements UserCmd { +public class ListDomainsCmd extends BaseListTaggedResourcesCmd implements UserCmd { private static final String s_name = "listdomainsresponse"; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java index e202dfad77ba..d0b9049a3d46 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/ListHostsCmd.java @@ -104,7 +104,7 @@ public class ListHostsCmd extends BaseListCmd { @Parameter(name = ApiConstants.HA_HOST, type = CommandType.BOOLEAN, description = "If true, list only hosts dedicated to HA") private Boolean haHost; - @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "Hypervisor type of host: XenServer,KVM,VMware,Hyperv,BareMetal,Simulator") + @Parameter(name = ApiConstants.HYPERVISOR, type = CommandType.STRING, description = "Hypervisor type of host: XenServer,KVM,VMware,BareMetal,Simulator") private String hypervisor; @Parameter(name = ApiConstants.MANAGEMENT_SERVER_ID, type = CommandType.UUID, entityType = ManagementServerResponse.class, description = "the id of the management server", since="4.21.0") diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java index 82699b40cdda..c085abd42c76 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/host/UpdateHostCmd.java @@ -147,7 +147,7 @@ public void execute() { this.setResponseObject(hostResponse); } catch (Exception e) { Host host = _entityMgr.findById(Host.class, getId()); - logger.debug("Failed to update host: {} with id {}", host, getId(), e); + logger.error("Failed to update {}", host, e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, String.format("Failed to update host: %s with id %d, %s", host, getId(), e.getMessage())); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/iso/ListIsoPermissionsCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/iso/ListIsoPermissionsCmdByAdmin.java index c4b9db3b7c38..b831a99cb0af 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/iso/ListIsoPermissionsCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/iso/ListIsoPermissionsCmdByAdmin.java @@ -1,4 +1,4 @@ -// Licensedname = "listIsoPermissions", to the Apache Software Foundation (ASF) under one +// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteManagementNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteManagementNetworkIpRangeCmd.java index 41cf5e518b34..5b50c90b3964 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteManagementNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteManagementNetworkIpRangeCmd.java @@ -116,7 +116,7 @@ public void execute() { logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (Exception e) { - logger.warn("Failed to delete management ip range from " + getStartIp() + " to " + getEndIp() + " of Pod: " + getPodId(), e); + logger.warn("Failed to delete management ip range from {} to {} of Pod: {}", getStartIp(), getEndIp(), getPodId(), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteStorageNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteStorageNetworkIpRangeCmd.java index d12135cc60c4..4ffe58332ebf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteStorageNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/DeleteStorageNetworkIpRangeCmd.java @@ -75,7 +75,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE SuccessResponse response = new SuccessResponse(getCommandName()); this.setResponseObject(response); } catch (Exception e) { - logger.warn("Failed to delete storage network ip range " + getId(), e); + logger.warn("Failed to delete storage network ip range {}", getId(), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListStorageNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListStorageNetworkIpRangeCmd.java index c269be933735..3e32bed3d500 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListStorageNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/ListStorageNetworkIpRangeCmd.java @@ -97,7 +97,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE response.setResponseName(getCommandName()); this.setResponseObject(response); } catch (Exception e) { - logger.warn("Failed to list storage Network IP range for rangeId=" + getRangeId() + " podId=" + getPodId() + " zoneId=" + getZoneId()); + logger.warn("Failed to list storage Network IP range for rangeId={} podId={} zoneId={}", getRangeId(), getPodId(), getZoneId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java index 8910966ba2e3..df9f6ad0664d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdateNetworkOfferingCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.network; -import java.util.ArrayList; import java.util.List; import org.apache.cloudstack.api.APICommand; @@ -26,18 +25,16 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.offering.DomainAndZoneIdResolver; import org.apache.cloudstack.api.response.NetworkOfferingResponse; -import org.apache.commons.lang3.StringUtils; -import com.cloud.dc.DataCenter; -import com.cloud.domain.Domain; -import com.cloud.exception.InvalidParameterValueException; + import com.cloud.offering.NetworkOffering; import com.cloud.user.Account; @APICommand(name = "updateNetworkOffering", description = "Updates a network offering.", responseObject = NetworkOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) -public class UpdateNetworkOfferingCmd extends BaseCmd { +public class UpdateNetworkOfferingCmd extends BaseCmd implements DomainAndZoneIdResolver { ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -130,63 +127,11 @@ public String getTags() { } public List getDomainIds() { - List validDomainIds = new ArrayList<>(); - if (StringUtils.isNotEmpty(domainIds)) { - if (domainIds.contains(",")) { - String[] domains = domainIds.split(","); - for (String domain : domains) { - Domain validDomain = _entityMgr.findByUuid(Domain.class, domain.trim()); - if (validDomain != null) { - validDomainIds.add(validDomain.getId()); - } else { - throw new InvalidParameterValueException("Failed to create network offering because invalid domain has been specified."); - } - } - } else { - domainIds = domainIds.trim(); - if (!domainIds.matches("public")) { - Domain validDomain = _entityMgr.findByUuid(Domain.class, domainIds.trim()); - if (validDomain != null) { - validDomainIds.add(validDomain.getId()); - } else { - throw new InvalidParameterValueException("Failed to create network offering because invalid domain has been specified."); - } - } - } - } else { - validDomainIds.addAll(_configService.getNetworkOfferingDomains(id)); - } - return validDomainIds; + return resolveDomainIds(domainIds, id, _configService::getNetworkOfferingDomains, "network offering"); } public List getZoneIds() { - List validZoneIds = new ArrayList<>(); - if (StringUtils.isNotEmpty(zoneIds)) { - if (zoneIds.contains(",")) { - String[] zones = zoneIds.split(","); - for (String zone : zones) { - DataCenter validZone = _entityMgr.findByUuid(DataCenter.class, zone.trim()); - if (validZone != null) { - validZoneIds.add(validZone.getId()); - } else { - throw new InvalidParameterValueException("Failed to create network offering because invalid zone has been specified."); - } - } - } else { - zoneIds = zoneIds.trim(); - if (!zoneIds.matches("all")) { - DataCenter validZone = _entityMgr.findByUuid(DataCenter.class, zoneIds.trim()); - if (validZone != null) { - validZoneIds.add(validZone.getId()); - } else { - throw new InvalidParameterValueException("Failed to create network offering because invalid zone has been specified."); - } - } - } - } else { - validZoneIds.addAll(_configService.getNetworkOfferingZones(id)); - } - return validZoneIds; + return resolveZoneIds(zoneIds, id, _configService::getNetworkOfferingZones, "network offering"); } ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePodManagementNetworkIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePodManagementNetworkIpRangeCmd.java index 6f90a070f0d1..394d42a65a3a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePodManagementNetworkIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/network/UpdatePodManagementNetworkIpRangeCmd.java @@ -139,7 +139,7 @@ public void execute() { logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (Exception e) { - logger.warn("Failed to update pod management IP range " + getNewStartIP() + "-" + getNewEndIP() + " of Pod: " + getPodId(), e); + logger.warn("Failed to update pod management IP range {}-{} of Pod: {}", getNewStartIP(), getNewEndIP(), getPodId(), e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java index c93b5d41a1c5..4d48327eeb77 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateDiskOfferingCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.offering; -import java.util.ArrayList; import java.util.List; import com.cloud.offering.DiskOffering.State; @@ -27,19 +26,18 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.offering.DomainAndZoneIdResolver; import org.apache.cloudstack.api.response.DiskOfferingResponse; import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.StringUtils; -import com.cloud.dc.DataCenter; -import com.cloud.domain.Domain; import com.cloud.exception.InvalidParameterValueException; import com.cloud.offering.DiskOffering; import com.cloud.user.Account; @APICommand(name = "updateDiskOffering", description = "Updates a disk offering.", responseObject = DiskOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) -public class UpdateDiskOfferingCmd extends BaseCmd { +public class UpdateDiskOfferingCmd extends BaseCmd implements DomainAndZoneIdResolver { ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -152,63 +150,11 @@ public Boolean getDisplayOffering() { } public List getDomainIds() { - List validDomainIds = new ArrayList<>(); - if (StringUtils.isNotEmpty(domainIds)) { - if (domainIds.contains(",")) { - String[] domains = domainIds.split(","); - for (String domain : domains) { - Domain validDomain = _entityMgr.findByUuid(Domain.class, domain.trim()); - if (validDomain != null) { - validDomainIds.add(validDomain.getId()); - } else { - throw new InvalidParameterValueException("Failed to create disk offering because invalid domain has been specified."); - } - } - } else { - domainIds = domainIds.trim(); - if (!domainIds.matches("public")) { - Domain validDomain = _entityMgr.findByUuid(Domain.class, domainIds.trim()); - if (validDomain != null) { - validDomainIds.add(validDomain.getId()); - } else { - throw new InvalidParameterValueException("Failed to create disk offering because invalid domain has been specified."); - } - } - } - } else { - validDomainIds.addAll(_configService.getDiskOfferingDomains(id)); - } - return validDomainIds; + return resolveDomainIds(domainIds, id, _configService::getDiskOfferingDomains, "disk offering"); } public List getZoneIds() { - List validZoneIds = new ArrayList<>(); - if (StringUtils.isNotEmpty(zoneIds)) { - if (zoneIds.contains(",")) { - String[] zones = zoneIds.split(","); - for (String zone : zones) { - DataCenter validZone = _entityMgr.findByUuid(DataCenter.class, zone.trim()); - if (validZone != null) { - validZoneIds.add(validZone.getId()); - } else { - throw new InvalidParameterValueException("Failed to create disk offering because invalid zone has been specified."); - } - } - } else { - zoneIds = zoneIds.trim(); - if (!zoneIds.matches("all")) { - DataCenter validZone = _entityMgr.findByUuid(DataCenter.class, zoneIds.trim()); - if (validZone != null) { - validZoneIds.add(validZone.getId()); - } else { - throw new InvalidParameterValueException("Failed to create disk offering because invalid zone has been specified."); - } - } - } - } else { - validZoneIds.addAll(_configService.getDiskOfferingZones(id)); - } - return validZoneIds; + return resolveZoneIds(zoneIds, id, _configService::getDiskOfferingZones, "disk offering"); } public String getTags() { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java index 8f1005844a9d..8e37499c95ed 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/offering/UpdateServiceOfferingCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.offering; -import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -28,19 +27,18 @@ import org.apache.cloudstack.api.BaseCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.offering.DomainAndZoneIdResolver; import org.apache.cloudstack.api.response.ServiceOfferingResponse; import org.apache.commons.lang3.EnumUtils; import org.apache.commons.lang3.StringUtils; -import com.cloud.dc.DataCenter; -import com.cloud.domain.Domain; import com.cloud.exception.InvalidParameterValueException; import com.cloud.offering.ServiceOffering; import com.cloud.user.Account; @APICommand(name = "updateServiceOffering", description = "Updates a service offering.", responseObject = ServiceOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) -public class UpdateServiceOfferingCmd extends BaseCmd { +public class UpdateServiceOfferingCmd extends BaseCmd implements DomainAndZoneIdResolver { ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -131,63 +129,11 @@ public Integer getSortKey() { } public List getDomainIds() { - List validDomainIds = new ArrayList<>(); - if (StringUtils.isNotEmpty(domainIds)) { - if (domainIds.contains(",")) { - String[] domains = domainIds.split(","); - for (String domain : domains) { - Domain validDomain = _entityMgr.findByUuid(Domain.class, domain.trim()); - if (validDomain != null) { - validDomainIds.add(validDomain.getId()); - } else { - throw new InvalidParameterValueException("Failed to create service offering because invalid domain has been specified."); - } - } - } else { - domainIds = domainIds.trim(); - if (!domainIds.matches("public")) { - Domain validDomain = _entityMgr.findByUuid(Domain.class, domainIds.trim()); - if (validDomain != null) { - validDomainIds.add(validDomain.getId()); - } else { - throw new InvalidParameterValueException("Failed to create service offering because invalid domain has been specified."); - } - } - } - } else { - validDomainIds.addAll(_configService.getServiceOfferingDomains(id)); - } - return validDomainIds; + return resolveDomainIds(domainIds, id, _configService::getServiceOfferingDomains, "service offering"); } public List getZoneIds() { - List validZoneIds = new ArrayList<>(); - if (StringUtils.isNotEmpty(zoneIds)) { - if (zoneIds.contains(",")) { - String[] zones = zoneIds.split(","); - for (String zone : zones) { - DataCenter validZone = _entityMgr.findByUuid(DataCenter.class, zone.trim()); - if (validZone != null) { - validZoneIds.add(validZone.getId()); - } else { - throw new InvalidParameterValueException("Failed to create service offering because invalid zone has been specified."); - } - } - } else { - zoneIds = zoneIds.trim(); - if (!zoneIds.matches("all")) { - DataCenter validZone = _entityMgr.findByUuid(DataCenter.class, zoneIds.trim()); - if (validZone != null) { - validZoneIds.add(validZone.getId()); - } else { - throw new InvalidParameterValueException("Failed to create service offering because invalid zone has been specified."); - } - } - } - } else { - validZoneIds.addAll(_configService.getServiceOfferingZones(id)); - } - return validZoneIds; + return resolveZoneIds(zoneIds, id, _configService::getServiceOfferingZones, "service offering"); } public String getStorageTags() { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/template/ListTemplatePermissionsCmdByAdmin.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/template/ListTemplatePermissionsCmdByAdmin.java index a68958a64b1a..6115ff98f38a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/template/ListTemplatePermissionsCmdByAdmin.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/template/ListTemplatePermissionsCmdByAdmin.java @@ -1,4 +1,4 @@ -// Licensedname = "listTemplatePermissions", to the Apache Software Foundation (ASF) under one +// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java index 2ba3b321887d..ff8dfdd14f27 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/AddTrafficTypeCmd.java @@ -67,11 +67,13 @@ public class AddTrafficTypeCmd extends BaseAsyncCreateCmd { description = "The network name label of the physical device dedicated to this traffic on a VMware host") private String vmwareLabel; + @Deprecated @Parameter(name = ApiConstants.HYPERV_NETWORK_LABEL, type = CommandType.STRING, description = "The network name label of the physical device dedicated to this traffic on a Hyperv host") private String hypervLabel; + @Deprecated @Parameter(name = ApiConstants.OVM3_NETWORK_LABEL, type = CommandType.STRING, description = "The network name of the physical device dedicated to this traffic on an OVM3 host") @@ -108,19 +110,11 @@ public String getVmwareLabel() { return vmwareLabel; } - public String getHypervLabel() { - return hypervLabel; - } - public String getSimulatorLabel() { //simulators will have no labels return null; } - public String getOvm3Label() { - return ovm3Label; - } - public void setVlan(String vlan) { this.vlan = vlan; } @@ -163,7 +157,7 @@ public void execute() { public void create() throws ResourceAllocationException { PhysicalNetworkTrafficType result = _networkService.addTrafficTypeToPhysicalNetwork(getPhysicalNetworkId(), getTrafficType(), getIsolationMethod(), getXenLabel(), getKvmLabel(), getVmwareLabel(), - getSimulatorLabel(), getVlan(), getHypervLabel(), getOvm3Label()); + getSimulatorLabel(), getVlan(), null, null); if (result != null) { setEntityId(result.getId()); setEntityUuid(result.getUuid()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypesCmd.java index 28e2eefe52f9..dc6e0f5dd69d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/ListTrafficTypesCmd.java @@ -26,14 +26,13 @@ import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.ListResponse; import org.apache.cloudstack.api.response.PhysicalNetworkResponse; -import org.apache.cloudstack.api.response.ProviderResponse; import org.apache.cloudstack.api.response.TrafficTypeResponse; import com.cloud.network.PhysicalNetworkTrafficType; import com.cloud.user.Account; import com.cloud.utils.Pair; -@APICommand(name = "listTrafficTypes", description = "Lists traffic types of a given physical network.", responseObject = ProviderResponse.class, since = "3.0.0", +@APICommand(name = "listTrafficTypes", description = "Lists traffic types of a given physical network.", responseObject = TrafficTypeResponse.class, since = "3.0.0", requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListTrafficTypesCmd extends BaseListCmd { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java index 0de4cfb7edda..a87782551490 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/usage/UpdateTrafficTypeCmd.java @@ -57,11 +57,13 @@ public class UpdateTrafficTypeCmd extends BaseAsyncCmd { description = "The network name label of the physical device dedicated to this traffic on a VMware host") private String vmwareLabel; + @Deprecated @Parameter(name = ApiConstants.HYPERV_NETWORK_LABEL, type = CommandType.STRING, description = "The network name label of the physical device dedicated to this traffic on a Hyperv host") private String hypervLabel; + @Deprecated @Parameter(name = ApiConstants.OVM3_NETWORK_LABEL, type = CommandType.STRING, description = "The network name of the physical device dedicated to this traffic on an OVM3 host") @@ -87,14 +89,6 @@ public String getVmwareLabel() { return vmwareLabel; } - public String getHypervLabel() { - return hypervLabel; - } - - public String getOvm3Label() { - return ovm3Label; - } - ///////////////////////////////////////////////////// /////////////// API Implementation/////////////////// ///////////////////////////////////////////////////// @@ -106,7 +100,7 @@ public long getEntityOwnerId() { @Override public void execute() { - PhysicalNetworkTrafficType result = _networkService.updatePhysicalNetworkTrafficType(getId(), getXenLabel(), getKvmLabel(), getVmwareLabel(), getHypervLabel(), getOvm3Label()); + PhysicalNetworkTrafficType result = _networkService.updatePhysicalNetworkTrafficType(getId(), getXenLabel(), getKvmLabel(), getVmwareLabel(), null, null); if (result != null) { TrafficTypeResponse response = _responseGenerator.createTrafficTypeResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java index 974c1c7bebed..6ce669d8523d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/user/DisableUserCmd.java @@ -78,12 +78,12 @@ public long getEntityOwnerId() { @Override public String getEventDescription() { - return "disabling user: " + getId(); + return "disabling user: " + this._uuidMgr.getUuid(User.class, getId()); } @Override public void execute() { - CallContext.current().setEventDetails("UserId: " + getId()); + CallContext.current().setEventDetails("User ID: " + this._uuidMgr.getUuid(User.class, getId())); UserAccount user = _regionService.disableUser(this); if (user != null) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java index 876474e24b9d..67278991383c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vlan/CreateVlanIpRangeCmd.java @@ -238,7 +238,7 @@ public void execute() throws ResourceUnavailableException, ResourceAllocationExc logger.warn("Exception: ", ex); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, ex.getMessage()); } catch (InsufficientCapacityException ex) { - logger.info(ex); + logger.error(ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java index d60c627163b8..ad7e8a48b252 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/ImportUnmanagedInstanceCmd.java @@ -201,9 +201,7 @@ public Map getNicNetworkList() { for (Map entry : (Collection>)nicNetworkList.values()) { String nic = entry.get(VmDetailConstants.NIC); String networkUuid = entry.get(VmDetailConstants.NETWORK); - if (logger.isDebugEnabled()) { - logger.debug(String.format("NIC, '%s', goes on net, '%s'", nic, networkUuid)); - } + logger.debug("Checking if NIC '{}' can be mapped on network '{}'", nic, networkUuid); if (StringUtils.isAnyEmpty(nic, networkUuid) || _entityMgr.findByUuid(Network.class, networkUuid) == null) { throw new InvalidParameterValueException(String.format("Network ID: %s for NIC ID: %s is invalid", networkUuid, nic)); } @@ -219,9 +217,7 @@ public Map getNicIpAddressList() { for (Map entry : (Collection>)nicIpAddressList.values()) { String nic = entry.get(VmDetailConstants.NIC); String ipAddress = StringUtils.defaultIfEmpty(entry.get(VmDetailConstants.IP4_ADDRESS), null); - if (logger.isDebugEnabled()) { - logger.debug(String.format("NIC, '%s', gets IP, '%s'", nic, ipAddress)); - } + logger.debug("Checking if IP address '{}' can be mapped to NIC '{}'", ipAddress, nic); if (StringUtils.isEmpty(nic)) { throw new InvalidParameterValueException(String.format("NIC ID: '%s' is invalid for IP address mapping", nic)); } @@ -244,9 +240,7 @@ public Map getDataDiskToDiskOfferingList() { for (Map entry : (Collection>)dataDiskToDiskOfferingList.values()) { String disk = entry.get(VmDetailConstants.DISK); String offeringUuid = entry.get(VmDetailConstants.DISK_OFFERING); - if (logger.isTraceEnabled()) { - logger.trace(String.format("disk, '%s', gets offering, '%s'", disk, offeringUuid)); - } + logger.trace("Checking if offering '{}' can be used on disk '{}'", offeringUuid, disk); if (StringUtils.isAnyEmpty(disk, offeringUuid) || _entityMgr.findByUuid(DiskOffering.class, offeringUuid) == null) { throw new InvalidParameterValueException(String.format("Disk offering ID: %s for disk ID: %s is invalid", offeringUuid, disk)); } @@ -298,7 +292,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { Account account = CallContext.current().getCallingAccount(); if (account != null) { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java index c92cda1a358c..0142f6fc81ab 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vm/MigrateVirtualMachineWithVolumeCmd.java @@ -155,7 +155,7 @@ private Host getDestinationHost() { Host destinationHost = _resourceService.getHost(getHostId()); // OfflineVmwareMigration: destination host would have to not be a required parameter for stopped VMs if (destinationHost == null) { - logger.error(String.format("Unable to find the host with ID [%s].", getHostId())); + logger.error("Unable to find the host with ID [{}].", getHostId()); throw new InvalidParameterValueException("Unable to find the specified host to migrate the Instance."); } return destinationHost; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/ImportVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/ImportVolumeCmd.java index 57c3ee586d35..50f4b9c1fbe5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/ImportVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/volume/ImportVolumeCmd.java @@ -156,7 +156,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java index 608794842ed4..6b425bc10d21 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/CreateVPCOfferingCmd.java @@ -224,10 +224,8 @@ public Map> getServiceProviders() { Iterator> iter = servicesCollection.iterator(); while (iter.hasNext()) { Map obj = iter.next(); - if (logger.isTraceEnabled()) { - logger.trace("service provider entry specified: " + obj); - } - HashMap services = (HashMap) obj; + logger.trace("Service provider entry specified: {}", obj); + HashMap services = (HashMap)obj; String service = services.get("service"); String provider = services.get("provider"); List providerList = null; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java index 44bc88c8daf5..d4565cbada26 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/admin/vpc/UpdateVPCOfferingCmd.java @@ -16,7 +16,6 @@ // under the License. package org.apache.cloudstack.api.command.admin.vpc; -import java.util.ArrayList; import java.util.List; import org.apache.cloudstack.api.APICommand; @@ -26,19 +25,16 @@ import org.apache.cloudstack.api.BaseAsyncCmd; import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.ServerApiException; +import org.apache.cloudstack.api.command.offering.DomainAndZoneIdResolver; import org.apache.cloudstack.api.response.VpcOfferingResponse; -import org.apache.commons.lang3.StringUtils; -import com.cloud.dc.DataCenter; -import com.cloud.domain.Domain; import com.cloud.event.EventTypes; -import com.cloud.exception.InvalidParameterValueException; import com.cloud.network.vpc.VpcOffering; import com.cloud.user.Account; @APICommand(name = "updateVPCOffering", description = "Updates VPC offering", responseObject = VpcOfferingResponse.class, requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) -public class UpdateVPCOfferingCmd extends BaseAsyncCmd { +public class UpdateVPCOfferingCmd extends BaseAsyncCmd implements DomainAndZoneIdResolver { ///////////////////////////////////////////////////// //////////////// API parameters ///////////////////// @@ -93,63 +89,11 @@ public String getState() { } public List getDomainIds() { - List validDomainIds = new ArrayList<>(); - if (StringUtils.isNotEmpty(domainIds)) { - if (domainIds.contains(",")) { - String[] domains = domainIds.split(","); - for (String domain : domains) { - Domain validDomain = _entityMgr.findByUuid(Domain.class, domain.trim()); - if (validDomain != null) { - validDomainIds.add(validDomain.getId()); - } else { - throw new InvalidParameterValueException("Failed to create VPC offering because invalid domain has been specified."); - } - } - } else { - domainIds = domainIds.trim(); - if (!domainIds.matches("public")) { - Domain validDomain = _entityMgr.findByUuid(Domain.class, domainIds.trim()); - if (validDomain != null) { - validDomainIds.add(validDomain.getId()); - } else { - throw new InvalidParameterValueException("Failed to create VPC offering because invalid domain has been specified."); - } - } - } - } else { - validDomainIds.addAll(_vpcProvSvc.getVpcOfferingDomains(id)); - } - return validDomainIds; + return resolveDomainIds(domainIds, id, _vpcProvSvc::getVpcOfferingDomains, "VPC offering"); } public List getZoneIds() { - List validZoneIds = new ArrayList<>(); - if (StringUtils.isNotEmpty(zoneIds)) { - if (zoneIds.contains(",")) { - String[] zones = zoneIds.split(","); - for (String zone : zones) { - DataCenter validZone = _entityMgr.findByUuid(DataCenter.class, zone.trim()); - if (validZone != null) { - validZoneIds.add(validZone.getId()); - } else { - throw new InvalidParameterValueException("Failed to create VPC offering because invalid zone has been specified."); - } - } - } else { - zoneIds = zoneIds.trim(); - if (!zoneIds.matches("all")) { - DataCenter validZone = _entityMgr.findByUuid(DataCenter.class, zoneIds.trim()); - if (validZone != null) { - validZoneIds.add(validZone.getId()); - } else { - throw new InvalidParameterValueException("Failed to create VPC offering because invalid zone has been specified."); - } - } - } - } else { - validZoneIds.addAll(_vpcProvSvc.getVpcOfferingZones(id)); - } - return validZoneIds; + return resolveZoneIds(zoneIds, id, _vpcProvSvc::getVpcOfferingZones, "VPC offering"); } public Integer getSortKey() { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/offering/DomainAndZoneIdResolver.java b/api/src/main/java/org/apache/cloudstack/api/command/offering/DomainAndZoneIdResolver.java new file mode 100644 index 000000000000..b302c4a9beec --- /dev/null +++ b/api/src/main/java/org/apache/cloudstack/api/command/offering/DomainAndZoneIdResolver.java @@ -0,0 +1,114 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.offering; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.LongFunction; + +import com.cloud.dc.DataCenter; +import com.cloud.domain.Domain; +import com.cloud.exception.InvalidParameterValueException; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.commons.lang3.StringUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; + +/** + * Helper for commands that accept a domainIds or zoneIds string and need to + * resolve them to lists of IDs, falling back to an offering-specific + * default provider. + */ +public interface DomainAndZoneIdResolver { + /** + * Parse the provided domainIds string and return a list of domain IDs. + * If domainIds is empty, the defaultDomainsProvider will be invoked with the + * provided resource id to obtain the current domains. + */ + default List resolveDomainIds(final String domainIds, final Long id, final LongFunction> defaultDomainsProvider, final String resourceTypeName) { + final List validDomainIds = new ArrayList<>(); + final BaseCmd base = (BaseCmd) this; + final Logger logger = LogManager.getLogger(base.getClass()); + + if (StringUtils.isEmpty(domainIds)) { + if (defaultDomainsProvider != null) { + final List defaults = defaultDomainsProvider.apply(id); + if (defaults != null) { + validDomainIds.addAll(defaults); + } + } + return validDomainIds; + } + + final String[] domains = domainIds.split(","); + final String type = (resourceTypeName == null || resourceTypeName.isEmpty()) ? "offering" : resourceTypeName; + for (String domain : domains) { + final String trimmed = domain == null ? "" : domain.trim(); + if (trimmed.isEmpty() || "public".equalsIgnoreCase(trimmed)) { + continue; + } + + final Domain validDomain = base._entityMgr.findByUuid(Domain.class, trimmed); + if (validDomain == null) { + logger.warn("Invalid domain specified for {}", type); + throw new InvalidParameterValueException("Failed to create " + type + " because invalid domain has been specified."); + } + validDomainIds.add(validDomain.getId()); + } + + return validDomainIds; + } + + /** + * Parse the provided zoneIds string and return a list of zone IDs. + * If zoneIds is empty, the defaultZonesProvider will be invoked with the + * provided resource id to obtain the current zones. + */ + default List resolveZoneIds(final String zoneIds, final Long id, final LongFunction> defaultZonesProvider, final String resourceTypeName) { + final List validZoneIds = new ArrayList<>(); + final BaseCmd base = (BaseCmd) this; + final Logger logger = LogManager.getLogger(base.getClass()); + + if (StringUtils.isEmpty(zoneIds)) { + if (defaultZonesProvider != null) { + final List defaults = defaultZonesProvider.apply(id); + if (defaults != null) { + validZoneIds.addAll(defaults); + } + } + return validZoneIds; + } + + final String[] zones = zoneIds.split(","); + final String type = (resourceTypeName == null || resourceTypeName.isEmpty()) ? "offering" : resourceTypeName; + for (String zone : zones) { + final String trimmed = zone == null ? "" : zone.trim(); + if (trimmed.isEmpty() || "all".equalsIgnoreCase(trimmed)) { + continue; + } + + final DataCenter validZone = base._entityMgr.findByUuid(DataCenter.class, trimmed); + if (validZone == null) { + logger.warn("Invalid zone specified for {}: {}", type, trimmed); + throw new InvalidParameterValueException("Failed to create " + type + " because invalid zone has been specified."); + } + validZoneIds.add(validZone.getId()); + } + + return validZoneIds; + } +} diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java index 4ecce59fa454..db9688aa09aa 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateAutoScaleVmProfileCmd.java @@ -232,7 +232,7 @@ public static String getResultObjectName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java index 9d42706cb7d7..2bb101de5593 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/CreateConditionCmd.java @@ -137,7 +137,7 @@ public String getEventType() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScalePolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScalePolicyCmd.java index 98e2e2a771ac..fac7ffe37d30 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScalePolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScalePolicyCmd.java @@ -91,7 +91,7 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); } else { - logger.warn("Failed to delete autoscale policy " + getId()); + logger.warn("Failed to delete autoscale policy {}", getId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete AutoScale Policy"); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmGroupCmd.java index 274aac90262d..c2dd6d5424d9 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmGroupCmd.java @@ -101,7 +101,7 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); } else { - logger.warn("Failed to delete autoscale Instance group " + getId()); + logger.warn("Failed to delete autoscale Instance group {}", getId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete autoscale vm group"); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmProfileCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmProfileCmd.java index a4fbd709610a..9e2f63deda2e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmProfileCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteAutoScaleVmProfileCmd.java @@ -90,7 +90,7 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); } else { - logger.warn("Failed to delete autoscale Instance profile " + getId()); + logger.warn("Failed to delete autoscale Instance profile {}", getId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete autoscale Instance profile"); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteConditionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteConditionCmd.java index 7b73a76fbbf4..2eeed8b49da1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteConditionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/autoscale/DeleteConditionCmd.java @@ -64,7 +64,7 @@ public void execute() { SuccessResponse response = new SuccessResponse(getCommandName()); setResponseObject(response); } else { - logger.warn("Failed to delete condition " + getId()); + logger.warn("Failed to delete condition {}", getId()); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to delete condition."); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java index 2d0cde65563d..df417ef3a60f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/CreateBackupCmd.java @@ -19,6 +19,7 @@ import javax.inject.Inject; +import com.cloud.vm.VirtualMachine; import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiCommandResourceType; @@ -138,7 +139,8 @@ public String getEventType() { @Override public String getEventDescription() { - return "Creating backup for Instance " + vmId; + String vmUuid = _uuidMgr.getUuid(VirtualMachine.class, getVmId()); + return "Creating backup for Instance " + vmUuid; } @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/DeleteBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/DeleteBackupCmd.java index 369934a79c49..8c32dac6c3ac 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/DeleteBackupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/DeleteBackupCmd.java @@ -28,6 +28,7 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.BackupResponse; import org.apache.cloudstack.api.response.SuccessResponse; +import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.context.CallContext; import org.apache.commons.lang3.BooleanUtils; @@ -111,6 +112,7 @@ public String getEventType() { @Override public String getEventDescription() { - return "Deleting backup ID " + backupId; + String backupUuid = _uuidMgr.getUuid(Backup.class, getId()); + return "Deleting backup ID " + backupUuid; } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java index f1f63037f235..fb9c92f433e5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/ListBackupsCmd.java @@ -148,7 +148,7 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE Pair, Integer> result = backupManager.listBackups(this); setupResponseBackupList(result.first(), result.second()); } catch (Exception e) { - logger.debug("Exception while listing backups", e); + logger.error("Exception while listing backups", e); throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreBackupCmd.java index 5385c0fb0b6a..3d096c0bb388 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreBackupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/backup/RestoreBackupCmd.java @@ -28,6 +28,7 @@ import org.apache.cloudstack.api.ServerApiException; import org.apache.cloudstack.api.response.SuccessResponse; import org.apache.cloudstack.api.response.BackupResponse; +import org.apache.cloudstack.backup.Backup; import org.apache.cloudstack.backup.BackupManager; import org.apache.cloudstack.context.CallContext; @@ -99,6 +100,7 @@ public String getEventType() { @Override public String getEventDescription() { - return "Restoring Instance from backup: " + backupId; + String backupUuid = _uuidMgr.getUuid(Backup.class, getBackupId()); + return "Restoring Instance from backup: " + backupUuid; } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java index 722556b8e2de..f1c149854d92 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/bucket/CreateBucketCmd.java @@ -150,7 +150,7 @@ public ApiCommandResourceType getApiResourceType() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java index ed1bd7b063b2..94b6062b6212 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/config/ListCapabilitiesCmd.java @@ -21,7 +21,9 @@ import org.apache.cloudstack.api.APICommand; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.Parameter; import org.apache.cloudstack.api.response.CapabilitiesResponse; +import org.apache.cloudstack.api.response.DomainResponse; import org.apache.cloudstack.config.ApiServiceConfiguration; import com.cloud.user.Account; @@ -30,12 +32,22 @@ requestHasSensitiveInfo = false, responseHasSensitiveInfo = false) public class ListCapabilitiesCmd extends BaseCmd { + @Parameter(name = ApiConstants.DOMAIN_ID, + type = CommandType.UUID, + entityType = DomainResponse.class, + description = "the domain for listing capabilities.", + since = "4.23.0") + private Long domainId; @Override public long getEntityOwnerId() { return Account.ACCOUNT_ID_SYSTEM; } + public Long getDomainId() { + return domainId; + } + @Override public void execute() { Map capabilities = _mgr.listCapabilities(this); @@ -76,6 +88,10 @@ public void execute() { response.setExtensionsPath((String)capabilities.get(ApiConstants.EXTENSIONS_PATH)); response.setDynamicScalingEnabled((Boolean) capabilities.get(ApiConstants.DYNAMIC_SCALING_ENABLED)); response.setAdditionalConfigEnabled((Boolean) capabilities.get(ApiConstants.ADDITONAL_CONFIG_ENABLED)); + if (capabilities.containsKey(ApiConstants.VPN_CUSTOMER_GATEWAY_PARAMETERS)) { + Map vpnCustomerGatewayParameters = (Map) capabilities.get(ApiConstants.VPN_CUSTOMER_GATEWAY_PARAMETERS); + response.setVpnCustomerGatewayParameters(vpnCustomerGatewayParameters); + } response.setObjectName("capability"); response.setResponseName(getCommandName()); this.setResponseObject(response); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateEgressFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateEgressFirewallRuleCmd.java index 7a96421a385b..f972cbdc6758 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateEgressFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateEgressFirewallRuleCmd.java @@ -255,11 +255,8 @@ public void create() { } } catch (NetworkRuleConflictException ex) { String message = "Network rule conflict: "; - if (!logger.isTraceEnabled()) { - logger.info(message + ex.getMessage()); - } else { - logger.trace(message, ex); - } + logger.error("{}{}", message, ex.getMessage()); + logger.trace(message, ex); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, ex.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java index 569b5fc454df..92be579a1ced 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/firewall/CreateFirewallRuleCmd.java @@ -271,7 +271,7 @@ public void create() { setEntityUuid(result.getUuid()); } } catch (NetworkRuleConflictException ex) { - logger.trace("Network Rule Conflict: ", ex); + logger.error("Network Rule Conflict: ", ex); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, ex.getMessage(), ex); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java index 564ebb20b758..43cdf09a89cf 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/GetUploadParamsForIsoCmd.java @@ -158,7 +158,7 @@ public String getCommandName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true); + Long accountId = _accountService.finalizeAccountId(getAccountName(), getDomainId(), getProjectId(), true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsoPermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsoPermissionsCmd.java index 48e4b85d66e2..069f3e4959bb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsoPermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsoPermissionsCmd.java @@ -1,4 +1,4 @@ -// Licensedname = "listIsoPermissions", to the Apache Software Foundation (ASF) under one +// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java index 6820afb93722..562cbc2c623d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/ListIsosCmd.java @@ -87,7 +87,7 @@ public class ListIsosCmd extends BaseListTaggedResourcesCmd implements UserCmd { private Boolean showIcon; @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, - description = "the CPU arch of the ISO. Valid options are: x86_64, aarch64", + description = "the CPU arch of the ISO. Valid options are: x86_64, aarch64, s390x", since = "4.20") private String arch; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java index 2de0f96f2716..1c57e902e221 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/iso/RegisterIsoCmd.java @@ -120,7 +120,7 @@ public class RegisterIsoCmd extends BaseCmd implements UserCmd { private Boolean passwordEnabled; @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, - description = "the CPU arch of the ISO. Valid options are: x86_64, aarch64", + description = "the CPU arch of the ISO. Valid options are: x86_64, aarch64, s390x", since = "4.20") private String arch; @@ -254,7 +254,7 @@ public String getCommandName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java index 8079e287c2ae..bf4604612b1d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/ListLoadBalancerRuleInstancesCmd.java @@ -95,10 +95,10 @@ public String getCommandName() { public void execute() { Pair, List> vmServiceMap = _lbService.listLoadBalancerInstances(this); List result = vmServiceMap.first(); - logger.debug(String.format("A total of [%s] user VMs were obtained when listing the load balancer instances: [%s].", result.size(), result)); + logger.debug("A total of [{}] user VMs were obtained when listing the load balancer instances: [{}].", result.size(), result); List serviceStates = vmServiceMap.second(); - logger.debug(String.format("A total of [%s] service states were obtained when listing the load balancer instances: [%s].", serviceStates.size(), serviceStates)); + logger.debug("A total of [{}] service states were obtained when listing the load balancer instances: [{}].", serviceStates.size(), serviceStates); if (!isListLbVmip()) { ListResponse response = new ListResponse<>(); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBHealthCheckPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBHealthCheckPolicyCmd.java index 80e285aec516..17bd61c502dc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBHealthCheckPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBHealthCheckPolicyCmd.java @@ -63,7 +63,7 @@ public long getEntityOwnerId() { @Override public String getEventDescription() { - return "Update load balancer health check policy ID= " + id; + return "Update load balancer health check policy ID = " + id; } @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBStickinessPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBStickinessPolicyCmd.java index 0163bb8a44f1..e79c1c561206 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBStickinessPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/loadbalancer/UpdateLBStickinessPolicyCmd.java @@ -62,7 +62,7 @@ public long getEntityOwnerId() { @Override public String getEventDescription() { - return "Update load balancer stickiness policy ID= " + id; + return "Update load balancer stickiness policy ID = " + id; } @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java index 1aaf5573a9dc..596d5952706a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/nat/CreateIpForwardingRuleCmd.java @@ -148,7 +148,7 @@ public void create() { setEntityId(rule.getId()); setEntityUuid(rule.getUuid()); } catch (NetworkRuleConflictException e) { - logger.info("Unable to create static NAT rule due to ", e); + logger.error("Unable to create static NAT rule due to ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); } } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLListCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLListCmd.java index 317032280f7a..31d1b49c2e71 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLListCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkACLListCmd.java @@ -128,7 +128,7 @@ public long getEntityOwnerId() { } else { account = CallContext.current().getCallingAccount(); if (!Account.Type.ADMIN.equals(account.getType())) { - logger.warn(String.format("Only Root Admin can create global ACLs. Account [%s] cannot create any global ACL.", account)); + logger.error("Only Root Admin can create global ACLs. {} cannot create any global ACL.", account); throw new PermissionDeniedException("Only Root Admin can create global ACLs."); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java index 35fec1d6b3e2..cbf6df081b3b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/CreateNetworkCmd.java @@ -417,7 +417,7 @@ public String getCommandName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/CreateRoutingFirewallRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/CreateRoutingFirewallRuleCmd.java index 7146d1ae1d19..ad52916c7a92 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/CreateRoutingFirewallRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/network/routing/CreateRoutingFirewallRuleCmd.java @@ -250,7 +250,7 @@ public void execute() throws ResourceUnavailableException { } ruleResponse.setResponseName(getCommandName()); } catch (Exception ex) { - logger.error("Got exception when create Routing firewall rules: " + ex); + logger.error("Got exception when create Routing firewall rules: ", ex); } finally { if (!success || rule == null) { routedIpv4Manager.revokeRoutingFirewallRule(getEntityId()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java index df9b6b318651..20b227b831c1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/region/ha/gslb/CreateGlobalLoadBalancerRuleCmd.java @@ -180,7 +180,7 @@ public ApiCommandResourceType getApiResourceType() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, null, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, null, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java index 7bae74c73a46..4bda23617092 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/ListResourceLimitsCmd.java @@ -103,7 +103,7 @@ public String getTag() { @Override public void execute() { List result = - _resourceLimitService.searchForLimits(id, _accountService.finalyzeAccountId(this.getAccountName(), this.getDomainId(), this.getProjectId(), false), this.getDomainId(), + _resourceLimitService.searchForLimits(id, _accountService.finalizeAccountId(this.getAccountName(), this.getDomainId(), this.getProjectId(), false), this.getDomainId(), getResourceTypeEnum(), getTag(), this.getStartIndex(), this.getPageSizeVal()); ListResponse response = new ListResponse(); List limitResponses = new ArrayList(); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java index 123b0e48a74c..d43bb29e9d27 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceCountCmd.java @@ -127,7 +127,7 @@ public long getEntityOwnerId() { @Override public void execute() { List result = - _resourceLimitService.recalculateResourceCount(_accountService.finalyzeAccountId(accountName, domainId, projectId, true), getDomainId(), getResourceType(), getTag()); + _resourceLimitService.recalculateResourceCount(_accountService.finalizeAccountId(accountName, domainId, projectId, true), getDomainId(), getResourceType(), getTag()); if ((result != null) && (result.size() > 0)) { ListResponse response = new ListResponse(); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java index 3678e885a6ea..f88ef9678e31 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/resource/UpdateResourceLimitCmd.java @@ -100,7 +100,7 @@ public Integer getResourceType() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } @@ -110,7 +110,7 @@ public long getEntityOwnerId() { @Override public void execute() { - ResourceLimit result = _resourceLimitService.updateResourceLimit(_accountService.finalyzeAccountId(accountName, domainId, projectId, true), getDomainId(), resourceType, max, getTag()); + ResourceLimit result = _resourceLimitService.updateResourceLimit(_accountService.finalizeAccountId(accountName, domainId, projectId, true), getDomainId(), resourceType, max, getTag()); if (result != null || (result == null && max != null && max.longValue() == -1L)) { ResourceLimitResponse response = _responseGenerator.createResourceLimitResponse(result); response.setResponseName(getCommandName()); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupEgressCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupEgressCmd.java index 13b09c7e19ac..7d0004c8e5d5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupEgressCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupEgressCmd.java @@ -166,7 +166,7 @@ public static String getResultObjectName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupIngressCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupIngressCmd.java index 07a4df9eb5d9..d7a95d8204e6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupIngressCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/AuthorizeSecurityGroupIngressCmd.java @@ -166,7 +166,7 @@ public static String getResultObjectName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/DeleteSecurityGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/DeleteSecurityGroupCmd.java index 0636f03c92a3..1882d80c1c1b 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/DeleteSecurityGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/securitygroup/DeleteSecurityGroupCmd.java @@ -103,7 +103,7 @@ public Long getId() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java index ac54ebbd8f8c..519f9876b960 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CopySnapshotCmd.java @@ -97,7 +97,11 @@ public class CopySnapshotCmd extends BaseAsyncCmd implements UserCmd { "The snapshot will always be made available in the zone in which the volume is present. Currently supported for StorPool only") protected List storagePoolIds; - @Parameter (name = ApiConstants.USE_STORAGE_REPLICATION, type=CommandType.BOOLEAN, required = false, since = "4.21.0", description = "This parameter enables the option the snapshot to be copied to supported primary storage") + @Parameter (name = ApiConstants.USE_STORAGE_REPLICATION, + type=CommandType.BOOLEAN, + since = "4.21.0", + description = "Enables the snapshot to be copied to the supported primary storages when the config 'use.storage.replication' is set to true for the storage or globally. " + + "This is supported only for StorPool storage for now.") protected Boolean useStorageReplication; ///////////////////////////////////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java index 3a49bad8fcb9..f78112d679fe 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotCmd.java @@ -112,7 +112,10 @@ public class CreateSnapshotCmd extends BaseAsyncCreateCmd { since = "4.21.0") protected List storagePoolIds; - @Parameter (name = ApiConstants.USE_STORAGE_REPLICATION, type=CommandType.BOOLEAN, required = false, description = "This parameter enables the option the snapshot to be copied to supported primary storage") + @Parameter (name = ApiConstants.USE_STORAGE_REPLICATION, + type=CommandType.BOOLEAN, + description = "Enables the snapshot to be copied to the supported primary storages when the config 'use.storage.replication' is set to true for the storage or globally. " + + "This is supported only for StorPool storage for now.") protected Boolean useStorageReplication; private String syncObjectType = BaseAsyncCmd.snapshotHostSyncObject; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java index c7a592c9c847..0dd275cb4ae2 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotFromVMSnapshotCmd.java @@ -165,7 +165,7 @@ public void create() throws ResourceAllocationException { @Override public void execute() { VMSnapshot vmSnapshot = _vmSnapshotService.getVMSnapshotById(getVMSnapshotId()); - logger.info("CreateSnapshotFromVMSnapshotCmd with Instance Snapshot {} with ID: {} and Snapshot [ID: {}, UUID: {}]", vmSnapshot, getVMSnapshotId(), getEntityId(), getEntityUuid()); + logger.info("CreateSnapshotFromVMSnapshotCmd with {} and Snapshot [ID: {}, UUID: {}]", vmSnapshot, getEntityId(), getEntityUuid()); CallContext.current().setEventDetails("Instance Snapshot Id: " + vmSnapshot.getUuid()); Snapshot snapshot = null; try { diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java index 24d756befaba..b1e7b2a00040 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/snapshot/CreateSnapshotPolicyCmd.java @@ -94,7 +94,11 @@ public class CreateSnapshotPolicyCmd extends BaseCmd { since = "4.21.0") protected List storagePoolIds; - @Parameter (name = ApiConstants.USE_STORAGE_REPLICATION, type=CommandType.BOOLEAN, required = false, since = "4.21.0", description = "This parameter enables the option the snapshot to be copied to supported primary storage") + @Parameter (name = ApiConstants.USE_STORAGE_REPLICATION, + type=CommandType.BOOLEAN, + since = "4.21.0", + description = "Enables the snapshot to be copied to the supported primary storages when the config 'use.storage.replication' is set to true for the storage or globally. " + + "This is supported only for StorPool storage for now.") protected Boolean useStorageReplication; ///////////////////////////////////////////////////// /////////////////// Accessors /////////////////////// diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java index a55b20593025..1b79c11644fb 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/CreateSSHKeyPairCmd.java @@ -77,7 +77,7 @@ public Long getProjectId() { ///////////////////////////////////////////////////// @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/RegisterSSHKeyPairCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/RegisterSSHKeyPairCmd.java index 36c708ea1112..f7af86d08357 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/RegisterSSHKeyPairCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/ssh/RegisterSSHKeyPairCmd.java @@ -85,7 +85,7 @@ public Long getProjectId() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/CreateSharedFSCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/CreateSharedFSCmd.java index ddaa31612a89..595b611b5c0d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/CreateSharedFSCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/storage/sharedfs/CreateSharedFSCmd.java @@ -230,7 +230,7 @@ public Long getApiResourceId() { } @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java index 6e6acf162521..76fadb7853ba 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/CreateTemplateCmd.java @@ -150,7 +150,7 @@ public class CreateTemplateCmd extends BaseAsyncCreateCmd implements UserCmd { private String accountName; @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, - description = "the CPU arch of the template. Valid options are: x86_64, aarch64. Defaults to x86_64", + description = "the CPU arch of the template. Valid options are: x86_64, aarch64, s390x. Defaults to x86_64", since = "4.20.2") private String arch; @@ -354,14 +354,12 @@ private void ensureAccessCheck(Account account) { private Long findAccountIdToUse(Account callingAccount) { Long accountIdToUse = null; try { - accountIdToUse = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + accountIdToUse = _accountService.finalizeAccountId(accountName, domainId, projectId, true); } catch (InvalidParameterValueException | PermissionDeniedException ex) { - if (logger.isDebugEnabled()) { - logger.debug(String.format("An exception occurred while finalizing account id with accountName, domainId and projectId" + - "using callingAccountId=%s", callingAccount.getUuid()), ex); - } - logger.warn("Unable to find accountId associated with accountName=" + accountName + " and domainId=" - + domainId + " or projectId=" + projectId + ", using callingAccountId=" + callingAccount.getUuid()); + logger.error("Unable to find accountId associated with accountName={} and domainId={} or projectId={}" + + ", using callingAccountId={}", accountName, domainId, projectId, callingAccount.getUuid()); + logger.debug("An exception occurred while finalizing account id with accountName, domainId and projectId" + + "using callingAccountId={}", callingAccount.getUuid(), ex); } return accountIdToUse != null ? accountIdToUse : callingAccount.getAccountId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java index 12ef433b0056..e6e178baada6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/GetUploadParamsForTemplateCmd.java @@ -57,7 +57,7 @@ public class GetUploadParamsForTemplateCmd extends AbstractGetUploadParamsCmd { private Long osTypeId; @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, - description = "the CPU arch of the template. Valid options are: x86_64, aarch64", + description = "the CPU arch of the template. Valid options are: x86_64, aarch64, s390x", since = "4.20") private String arch; @@ -223,7 +223,7 @@ public String getCommandName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true); + Long accountId = _accountService.finalizeAccountId(getAccountName(), getDomainId(), getProjectId(), true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatePermissionsCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatePermissionsCmd.java index 9dabbec37500..7e7efcf87cf4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatePermissionsCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatePermissionsCmd.java @@ -1,4 +1,4 @@ -// Licensedname = "listTemplatePermissions", to the Apache Software Foundation (ASF) under one +// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java index 0cf725690be8..0b52413aaf19 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/ListTemplatesCmd.java @@ -112,7 +112,7 @@ public class ListTemplatesCmd extends BaseListTaggedResourcesCmd implements User private Boolean forCks; @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, - description = "the CPU arch of the template. Valid options are: x86_64, aarch64", + description = "the CPU arch of the template. Valid options are: x86_64, aarch64, s390x", since = "4.20") private String arch; diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java index b5011035cbc2..49992ac66611 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/template/RegisterTemplateCmd.java @@ -180,7 +180,7 @@ public class RegisterTemplateCmd extends BaseCmd implements UserCmd { private String templateType; @Parameter(name = ApiConstants.ARCH, type = CommandType.STRING, - description = "the CPU arch of the template. Valid options are: x86_64, aarch64", + description = "the CPU arch of the template. Valid options are: x86_64, aarch64, s390x", since = "4.20") private String arch; @@ -344,7 +344,7 @@ public ApiCommandResourceType getInstanceType() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterCniConfigurationCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterCniConfigurationCmd.java index eb80da3be05b..3f1de41eab89 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterCniConfigurationCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterCniConfigurationCmd.java @@ -67,7 +67,7 @@ public void execute() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true); + Long accountId = _accountService.finalizeAccountId(getAccountName(), getDomainId(), getProjectId(), true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java index cbbe76748143..d99f2fd066d6 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmd.java @@ -70,7 +70,7 @@ public String getUserData() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true); + Long accountId = _accountService.finalizeAccountId(getAccountName(), getDomainId(), getProjectId(), true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java index ecbde47692f2..07c11b21107a 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/BaseDeployVMCmd.java @@ -416,9 +416,7 @@ public Map getVmNetworkMap() { nic = null; } String networkUuid = entry.get(VmDetailConstants.NETWORK); - if (logger.isTraceEnabled()) { - logger.trace(String.format("nic, '%s', goes on net, '%s'", nic, networkUuid)); - } + logger.trace("Checking if NIC '{}' can be mapped on network '{}'", nic, networkUuid); if (nic == null || StringUtils.isEmpty(networkUuid) || _entityMgr.findByUuid(Network.class, networkUuid) == null) { throw new InvalidParameterValueException(String.format("Network ID: %s for NIC ID: %s is invalid", networkUuid, nic)); } @@ -810,7 +808,7 @@ public static String getResultObjectName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java index 04e413ed67ab..e17ba9c2d705 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/CreateVMFromBackupCmd.java @@ -137,7 +137,7 @@ public void execute () { message.append(", Please check the affinity groups provided, there may not be sufficient capacity to follow them"); } } - logger.info(String.format("%s: %s", message.toString(), ex.getLocalizedMessage())); + logger.info("{}: {}", message.toString(), ex.getLocalizedMessage()); logger.debug(message.toString(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, message.toString()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java index 393a2bb47275..06272cadae26 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/DeployVMCmd.java @@ -112,12 +112,12 @@ public void execute() { message.append(", Please check the affinity groups provided, there may not be sufficient capacity to follow them"); } } - logger.info(String.format("%s: %s", message, ex.getLocalizedMessage())); + logger.info("{}: {}", message.toString(), ex.getLocalizedMessage()); logger.debug(message.toString(), ex); throw new ServerApiException(ApiErrorCode.INSUFFICIENT_CAPACITY_ERROR, message.toString()); } } else { - logger.info("Instance " + getEntityUuid() + " already created, load UserVm from DB"); + logger.info("Instance {} already created, load UserVm from DB", getEntityUuid()); result = _userVmService.finalizeCreateVirtualMachine(getEntityId()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMPasswordCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMPasswordCmd.java index dbf11de83258..5302675fb5f1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMPasswordCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vm/ResetVMPasswordCmd.java @@ -120,9 +120,9 @@ public void execute() throws ResourceUnavailableException, InsufficientCapacityE UserVm vm = _responseGenerator.findUserVmById(getId()); if (StringUtils.isBlank(password)) { password = _mgr.generateRandomPassword(); - logger.debug(String.format("Resetting VM [%s] password to a randomly generated password.", vm.getUuid())); + logger.debug("Resetting VM [{}] password to a randomly generated password.", vm.getUuid()); } else { - logger.debug(String.format("Resetting VM [%s] password to password defined by user.", vm.getUuid())); + logger.debug("Resetting VM [{}] password to password defined by user.", vm.getUuid()); } CallContext.current().setEventDetails("Vm Id: " + getId()); UserVm result = _userVmService.resetVMPassword(this, password); diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/CreateVMGroupCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/CreateVMGroupCmd.java index a142ffa2d492..12f534e1a221 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/CreateVMGroupCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vmgroup/CreateVMGroupCmd.java @@ -82,7 +82,7 @@ public Long getProjectId() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java index 27b592aa8f15..5938bdb810f5 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/CreateVolumeCmd.java @@ -188,7 +188,7 @@ public ApiCommandResourceType getApiResourceType() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/GetUploadParamsForVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/GetUploadParamsForVolumeCmd.java index 0020fe4021e8..1e3b2e460772 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/GetUploadParamsForVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/GetUploadParamsForVolumeCmd.java @@ -72,7 +72,7 @@ public String getCommandName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(getAccountName(), getDomainId(), getProjectId(), true); + Long accountId = _accountService.finalizeAccountId(getAccountName(), getDomainId(), getProjectId(), true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java index 3d23a6317223..81077deff654 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/volume/UploadVolumeCmd.java @@ -159,7 +159,7 @@ public String getCommandName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java index 15a31d48db1c..1605e4830bd1 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateStaticRouteCmd.java @@ -95,7 +95,7 @@ public void create() throws ResourceAllocationException { setEntityId(result.getId()); setEntityUuid(result.getUuid()); } catch (NetworkRuleConflictException ex) { - logger.info("Network rule conflict: " + ex.getMessage()); + logger.info("Network rule conflict: {}", ex.getMessage()); logger.trace("Network rule conflict: ", ex); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, ex.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java index a91d3bad410f..2adbbd664085 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpc/CreateVPCCmd.java @@ -273,7 +273,7 @@ public String getCommandName() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java index b00b932258a4..78cd9a3ac7e4 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/AddVpnUserCmd.java @@ -88,7 +88,7 @@ public Long getProjectId() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java index e52e3008202c..c730c2c5fe88 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateRemoteAccessVpnCmd.java @@ -146,7 +146,7 @@ public void create() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create remote access VPN"); } } catch (NetworkRuleConflictException e) { - logger.info("Network rule conflict: " + e.getMessage()); + logger.info("Network rule conflict: {}", e.getMessage()); logger.trace("Network Rule Conflict: ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java index e7b3863e7e32..3d6b7918effd 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnConnectionCmd.java @@ -133,7 +133,7 @@ public void create() { throw new ServerApiException(ApiErrorCode.INTERNAL_ERROR, "Failed to create site to site VPN connection"); } } catch (NetworkRuleConflictException e) { - logger.info("Network rule conflict: " + e.getMessage()); + logger.info("Network rule conflict: {}", e.getMessage()); logger.trace("Network Rule Conflict: ", e); throw new ServerApiException(ApiErrorCode.NETWORK_RULE_CONFLICT_ERROR, e.getMessage()); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java index ef0e2354495f..0da813eb4867 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/CreateVpnCustomerGatewayCmd.java @@ -167,7 +167,7 @@ public String getIkeVersion() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { accountId = CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java index aff87105f9cb..a18619c89498 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/RemoveVpnUserCmd.java @@ -82,7 +82,7 @@ public Long getProjecId() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, projectId, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, projectId, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java index b6e29e66ff4b..f681c8cce182 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/ResetVpnConnectionCmd.java @@ -74,7 +74,7 @@ public Long getId() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, null, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, null, true); if (accountId == null) { return CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnConnectionCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnConnectionCmd.java index a4a0c927a576..92f6786268ad 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnConnectionCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnConnectionCmd.java @@ -66,7 +66,7 @@ public long getEntityOwnerId() { @Override public String getEventDescription() { - return "Updating site-to-site VPN connection id= " + id; + return "Updating site-to-site VPN connection ID = " + id; } @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java index edd168f08373..56aa8b2cd16d 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnCustomerGatewayCmd.java @@ -161,7 +161,7 @@ public String getIkeVersion() { @Override public long getEntityOwnerId() { - Long accountId = _accountService.finalyzeAccountId(accountName, domainId, null, true); + Long accountId = _accountService.finalizeAccountId(accountName, domainId, null, true); if (accountId == null) { accountId = CallContext.current().getCallingAccount().getId(); } diff --git a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnGatewayCmd.java b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnGatewayCmd.java index 29676bdf0cc4..25076991217f 100644 --- a/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnGatewayCmd.java +++ b/api/src/main/java/org/apache/cloudstack/api/command/user/vpn/UpdateVpnGatewayCmd.java @@ -63,7 +63,7 @@ public long getEntityOwnerId() { @Override public String getEventDescription() { - return "Update site-to-site VPN gateway id= " + id; + return "Update site-to-site VPN gateway ID = " + id; } @Override diff --git a/api/src/main/java/org/apache/cloudstack/api/response/BackupOfferingResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/BackupOfferingResponse.java index b3a7d0362198..c4f3ee31dadc 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/BackupOfferingResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/BackupOfferingResponse.java @@ -61,6 +61,16 @@ public class BackupOfferingResponse extends BaseResponse { @Param(description = "Zone name") private String zoneName; + @SerializedName(ApiConstants.DOMAIN_ID) + @Param(description = "the domain ID(s) this backup offering belongs to.", + since = "4.23.0") + private String domainId; + + @SerializedName(ApiConstants.DOMAIN) + @Param(description = "the domain name(s) this backup offering belongs to.", + since = "4.23.0") + private String domain; + @SerializedName(ApiConstants.CROSS_ZONE_INSTANCE_CREATION) @Param(description = "the backups with this offering can be used to create Instances on all Zones", since = "4.22.0") private Boolean crossZoneInstanceCreation; @@ -108,4 +118,13 @@ public void setCrossZoneInstanceCreation(Boolean crossZoneInstanceCreation) { public void setCreated(Date created) { this.created = created; } + + public void setDomainId(String domainId) { + this.domainId = domainId; + } + + public void setDomain(String domain) { + this.domain = domain; + } + } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java index 930d1a50de0f..816216962808 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/CapabilitiesResponse.java @@ -16,6 +16,8 @@ // under the License. package org.apache.cloudstack.api.response; +import java.util.Map; + import org.apache.cloudstack.acl.RoleType; import org.apache.cloudstack.api.ApiConstants; import org.apache.cloudstack.api.BaseResponse; @@ -153,6 +155,10 @@ public class CapabilitiesResponse extends BaseResponse { @Param(description = "true if additional configurations or extraconfig can be passed to Instances", since = "4.20.2") private Boolean additionalConfigEnabled; + @SerializedName(ApiConstants.VPN_CUSTOMER_GATEWAY_PARAMETERS) + @Param(description = "Excluded and obsolete VPN customer gateway cryptographic parameters") + private Map vpnCustomerGatewayParameters; + public void setSecurityGroupsEnabled(boolean securityGroupsEnabled) { this.securityGroupsEnabled = securityGroupsEnabled; } @@ -280,4 +286,8 @@ public void setDynamicScalingEnabled(Boolean dynamicScalingEnabled) { public void setAdditionalConfigEnabled(Boolean additionalConfigEnabled) { this.additionalConfigEnabled = additionalConfigEnabled; } + + public void setVpnCustomerGatewayParameters(Map vpnCustomerGatewayParameters) { + this.vpnCustomerGatewayParameters = vpnCustomerGatewayParameters; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/DomainResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/DomainResponse.java index e018b1a0f724..453c6b229e97 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/DomainResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/DomainResponse.java @@ -16,21 +16,21 @@ // under the License. package org.apache.cloudstack.api.response; -import com.google.gson.annotations.SerializedName; +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.Set; import org.apache.cloudstack.api.ApiConstants; -import org.apache.cloudstack.api.BaseResponseWithAnnotations; +import org.apache.cloudstack.api.BaseResponseWithTagInformation; import org.apache.cloudstack.api.EntityReference; import com.cloud.domain.Domain; import com.cloud.serializer.Param; - -import java.util.Date; -import java.util.List; -import java.util.Map; +import com.google.gson.annotations.SerializedName; @EntityReference(value = Domain.class) -public class DomainResponse extends BaseResponseWithAnnotations implements ResourceLimitAndCountResponse, SetResourceIconResponse { +public class DomainResponse extends BaseResponseWithTagInformation implements ResourceLimitAndCountResponse, SetResourceIconResponse { @SerializedName(ApiConstants.ID) @Param(description = "The ID of the domain") private String id; @@ -589,4 +589,8 @@ public void setDetails(Map details) { public void setTaggedResourceLimitsAndCounts(List taggedResourceLimitsAndCounts) { this.taggedResources = taggedResourceLimitsAndCounts; } + + public void setTags(Set tags) { + this.tags = tags; + } } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteCustomerGatewayResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteCustomerGatewayResponse.java index 4e5820279a2e..b121ef7ce61e 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteCustomerGatewayResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/Site2SiteCustomerGatewayResponse.java @@ -114,6 +114,14 @@ public class Site2SiteCustomerGatewayResponse extends BaseResponseWithAnnotation @Param(description = "Which IKE Version to use, one of ike (autoselect), IKEv1, or IKEv2. Defaults to ike") private String ikeVersion; + @SerializedName(ApiConstants.OBSOLETE_PARAMETERS) + @Param(description = "Contains the list of obsolete/insecure cryptographic parameters that the vpn customer gateway is using.", since = "4.23.0") + private String obsoleteParameters; + + @SerializedName(ApiConstants.EXCLUDED_PARAMETERS) + @Param(description = "Contains the list of excluded/not allowed cryptographic parameters that the vpn customer gateway is using.", since = "4.23.0") + private String excludedParameters; + public void setId(String id) { this.id = id; } @@ -202,4 +210,12 @@ public void setDomainPath(String domainPath) { this.domainPath = domainPath; } + public void setContainsObsoleteParameters(String obsoleteParameters) { + this.obsoleteParameters = obsoleteParameters; + } + + public void setContainsExcludedParameters(String excludedParameters) { + this.excludedParameters = excludedParameters; + } + } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/SslCertResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/SslCertResponse.java index ff2f9562027e..d4add82f666c 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/SslCertResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/SslCertResponse.java @@ -27,8 +27,6 @@ import org.apache.cloudstack.network.tls.SslCert; import com.cloud.serializer.Param; -//import org.apache.cloudstack.api.EntityReference; - @EntityReference(value = SslCert.class) public class SslCertResponse extends BaseResponse { diff --git a/api/src/main/java/org/apache/cloudstack/api/response/SystemVmResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/SystemVmResponse.java index e69f6366742c..a3ed88c27356 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/SystemVmResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/SystemVmResponse.java @@ -126,6 +126,10 @@ public class SystemVmResponse extends BaseResponseWithAnnotations { @Param(description = "The public netmask for the System VM") private String publicNetmask; + @SerializedName("storageip") + @Param(description = "the ip address for the system VM on the storage network") + private String storageIp; + @SerializedName("templateid") @Param(description = "The Template ID for the System VM") private String templateId; @@ -355,6 +359,14 @@ public void setPublicNetmask(String publicNetmask) { this.publicNetmask = publicNetmask; } + public String getStorageIp() { + return storageIp; + } + + public void setStorageIp(String storageIp) { + this.storageIp = storageIp; + } + public String getTemplateId() { return templateId; } diff --git a/api/src/main/java/org/apache/cloudstack/api/response/TrafficTypeResponse.java b/api/src/main/java/org/apache/cloudstack/api/response/TrafficTypeResponse.java index bbf7159a4854..9d00d01eeb88 100644 --- a/api/src/main/java/org/apache/cloudstack/api/response/TrafficTypeResponse.java +++ b/api/src/main/java/org/apache/cloudstack/api/response/TrafficTypeResponse.java @@ -52,13 +52,13 @@ public class TrafficTypeResponse extends BaseResponse { @Param(description = "The Network name label of the physical device dedicated to this traffic on a VMware host") private String vmwareNetworkLabel; - @SerializedName(ApiConstants.HYPERV_NETWORK_LABEL) - @Param(description = "The Network name label of the physical device dedicated to this traffic on a HyperV host") - private String hypervNetworkLabel; + @SerializedName(ApiConstants.VLAN) + @Param(description = "The VLAN id to be used for Management traffic by VMware host") + private String vlan; - @SerializedName(ApiConstants.OVM3_NETWORK_LABEL) - @Param(description = "The Network name of the physical device dedicated to this traffic on an OVM3 host") - private String ovm3NetworkLabel; + @SerializedName(ApiConstants.ISOLATION_METHODS) + @Param(description = "isolation methods for the physical network traffic") + private String isolationMethods; @Override public String getObjectId() { @@ -97,18 +97,10 @@ public String getKvmLabel() { return kvmNetworkLabel; } - public String getHypervLabel() { - return hypervNetworkLabel; - } - public void setXenLabel(String xenLabel) { this.xenNetworkLabel = xenLabel; } - public void setHypervLabel(String hypervLabel) { - this.hypervNetworkLabel = hypervLabel; - } - public void setKvmLabel(String kvmLabel) { this.kvmNetworkLabel = kvmLabel; } @@ -121,11 +113,19 @@ public String getVmwareLabel() { return vmwareNetworkLabel; } - public String getOvm3Label() { - return ovm3NetworkLabel; + public String getIsolationMethods() { + return isolationMethods; + } + + public void setIsolationMethods(String isolationMethods) { + this.isolationMethods = isolationMethods; + } + + public String getVlan() { + return vlan; } - public void setOvm3Label(String ovm3Label) { - this.ovm3NetworkLabel = ovm3Label; + public void setVlan(String vlan) { + this.vlan = vlan; } } diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java index db051313d962..cbaf61405970 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupManager.java @@ -136,6 +136,8 @@ public interface BackupManager extends BackupService, Configurable, PluggableSer */ BackupOffering importBackupOffering(final ImportBackupOfferingCmd cmd); + List getBackupOfferingDomains(final Long offeringId); + /** * List backup offerings * @param ListBackupOfferingsCmd API cmd diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java index 32a714370dfc..23b8092425d9 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupProvider.java @@ -124,6 +124,10 @@ public interface BackupProvider { */ boolean supportsInstanceFromBackup(); + default boolean supportsMemoryVmSnapshot() { + return true; + } + /** * Returns the backup storage usage (Used, Total) for a backup provider * @param zoneId the zone for which to return metrics diff --git a/api/src/main/java/org/apache/cloudstack/backup/BackupService.java b/api/src/main/java/org/apache/cloudstack/backup/BackupService.java index d4beb629fe0f..3ba2978c0fa5 100644 --- a/api/src/main/java/org/apache/cloudstack/backup/BackupService.java +++ b/api/src/main/java/org/apache/cloudstack/backup/BackupService.java @@ -34,4 +34,11 @@ public interface BackupService { * @return backup provider */ BackupProvider getBackupProvider(final Long zoneId); + + /** + * Find backup provider by name + * @param name backup provider name + * @return backup provider + */ + BackupProvider getBackupProvider(final String name); } diff --git a/api/src/main/java/org/apache/cloudstack/context/CallContext.java b/api/src/main/java/org/apache/cloudstack/context/CallContext.java index 69376e4f6d7d..4cefd7847fdd 100644 --- a/api/src/main/java/org/apache/cloudstack/context/CallContext.java +++ b/api/src/main/java/org/apache/cloudstack/context/CallContext.java @@ -180,9 +180,7 @@ protected static CallContext register(User callingUser, Account callingAccount, } s_currentContext.set(callingContext); ThreadContext.push("ctx-" + UuidUtils.first(contextId)); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Registered: " + callingContext); - } + LOGGER.trace("Registered: {}", callingContext); s_currentContextStack.get().push(callingContext); @@ -279,9 +277,7 @@ public static CallContext unregister() { return null; } s_currentContext.remove(); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Unregistered: " + context); - } + LOGGER.trace("Unregistered: {}", context); String contextId = context.getContextId(); String sessionIdOnStack = null; String sessionIdPushedToNDC = "ctx-" + UuidUtils.first(contextId); @@ -289,9 +285,7 @@ public static CallContext unregister() { if (sessionIdOnStack.isEmpty() || sessionIdPushedToNDC.equals(sessionIdOnStack)) { break; } - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Popping from NDC: " + contextId); - } + LOGGER.trace("Popping from NDC: {}", contextId); } Stack stack = s_currentContextStack.get(); diff --git a/api/src/main/java/org/apache/cloudstack/context/LogContext.java b/api/src/main/java/org/apache/cloudstack/context/LogContext.java index c367975aba3b..24b92090e7f8 100644 --- a/api/src/main/java/org/apache/cloudstack/context/LogContext.java +++ b/api/src/main/java/org/apache/cloudstack/context/LogContext.java @@ -136,9 +136,7 @@ protected static LogContext register(User callingUser, Account callingAccount, L } s_currentContext.set(callingContext); ThreadContext.put("logcontextid", UuidUtils.first(contextId)); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Registered for log: " + callingContext); - } + LOGGER.trace("Registered for log: {}", callingContext); return callingContext; } @@ -207,9 +205,7 @@ public static void unregister() { LogContext context = s_currentContext.get(); if (context != null) { s_currentContext.remove(); - if (LOGGER.isTraceEnabled()) { - LOGGER.trace("Unregistered: " + context); - } + LOGGER.trace("Unregistered: {}", context); } ThreadContext.clearMap(); } diff --git a/api/src/test/java/com/cloud/cpu/CPUTest.java b/api/src/test/java/com/cloud/cpu/CPUTest.java index dfedf21864cc..0a059cf9a90a 100644 --- a/api/src/test/java/com/cloud/cpu/CPUTest.java +++ b/api/src/test/java/com/cloud/cpu/CPUTest.java @@ -28,6 +28,7 @@ public void testCPUArchGetType() { assertEquals("i686", CPU.CPUArch.x86.getType()); assertEquals("x86_64", CPU.CPUArch.amd64.getType()); assertEquals("aarch64", CPU.CPUArch.arm64.getType()); + assertEquals("s390x", CPU.CPUArch.s390x.getType()); } @Test @@ -35,6 +36,7 @@ public void testCPUArchGetBits() { assertEquals(32, CPU.CPUArch.x86.getBits()); assertEquals(64, CPU.CPUArch.amd64.getBits()); assertEquals(64, CPU.CPUArch.arm64.getBits()); + assertEquals(64, CPU.CPUArch.s390x.getBits()); } @Test @@ -42,6 +44,7 @@ public void testCPUArchFromTypeWithValidValues() { assertEquals(CPU.CPUArch.x86, CPU.CPUArch.fromType("i686")); assertEquals(CPU.CPUArch.amd64, CPU.CPUArch.fromType("x86_64")); assertEquals(CPU.CPUArch.arm64, CPU.CPUArch.fromType("aarch64")); + assertEquals(CPU.CPUArch.s390x, CPU.CPUArch.fromType("s390x")); } @Test @@ -61,7 +64,7 @@ public void testCPUArchFromTypeWithInvalidValue() { @Test public void testCPUArchGetTypesAsCSV() { - String expectedCSV = "i686,x86_64,aarch64"; + String expectedCSV = "i686,x86_64,aarch64,s390x"; assertEquals(expectedCSV, CPU.CPUArch.getTypesAsCSV()); } } diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmdTest.java new file mode 100644 index 000000000000..51b1cd9e14b7 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/config/UpdateCfgCmdTest.java @@ -0,0 +1,81 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.admin.config; + +import org.apache.cloudstack.api.response.ConfigurationResponse; +import org.apache.cloudstack.config.Configuration; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.junit.MockitoJUnitRunner; + +import com.cloud.utils.crypt.DBEncryptionUtil; + +@RunWith(MockitoJUnitRunner.class) +public class UpdateCfgCmdTest { + + private UpdateCfgCmd updateCfgCmd; + + private MockedStatic mockedStatic; + + @Before + public void setUp() { + updateCfgCmd = new UpdateCfgCmd(); + mockedStatic = Mockito.mockStatic(DBEncryptionUtil.class); + } + + @After + public void tearDown() { + mockedStatic.close(); + } + + @Test + public void setResponseValueSetsEncryptedValueWhenConfigurationIsEncrypted() { + ConfigurationResponse response = new ConfigurationResponse(); + Configuration cfg = Mockito.mock(Configuration.class); + Mockito.when(cfg.isEncrypted()).thenReturn(true); + Mockito.when(cfg.getValue()).thenReturn("testValue"); + Mockito.when(DBEncryptionUtil.encrypt("testValue")).thenReturn("encryptedValue"); + updateCfgCmd.setResponseValue(response, cfg); + Assert.assertEquals("encryptedValue", response.getValue()); + } + + @Test + public void setResponseValueSetsPlainValueWhenConfigurationIsNotEncrypted() { + ConfigurationResponse response = new ConfigurationResponse(); + Configuration cfg = Mockito.mock(Configuration.class); + Mockito.when(cfg.isEncrypted()).thenReturn(false); + Mockito.when(cfg.getValue()).thenReturn("testValue"); + updateCfgCmd.setResponseValue(response, cfg); + Assert.assertEquals("testValue", response.getValue()); + } + + @Test + public void setResponseValueHandlesNullConfigurationValueGracefully() { + ConfigurationResponse response = new ConfigurationResponse(); + Configuration cfg = Mockito.mock(Configuration.class); + Mockito.when(cfg.isEncrypted()).thenReturn(false); + Mockito.when(cfg.getValue()).thenReturn(null); + updateCfgCmd.setResponseValue(response, cfg); + Assert.assertNull(response.getValue()); + } + +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/admin/volume/ImportVolumeCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/admin/volume/ImportVolumeCmdTest.java index a7c41b9271b1..235acb15eeab 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/admin/volume/ImportVolumeCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/admin/volume/ImportVolumeCmdTest.java @@ -46,7 +46,7 @@ public void testImportVolumeCmd() { Long projectId = 5L; long accountId = 6L; - Mockito.when(accountService.finalyzeAccountId(accountName, domainId, projectId, true)).thenReturn(accountId); + Mockito.when(accountService.finalizeAccountId(accountName, domainId, projectId, true)).thenReturn(accountId); ImportVolumeCmd cmd = new ImportVolumeCmd(); ReflectionTestUtils.setField(cmd, "path", path); diff --git a/api/src/test/java/org/apache/cloudstack/api/command/offering/DomainAndZoneIdResolverTest.java b/api/src/test/java/org/apache/cloudstack/api/command/offering/DomainAndZoneIdResolverTest.java new file mode 100644 index 000000000000..e679bbf2d1f1 --- /dev/null +++ b/api/src/test/java/org/apache/cloudstack/api/command/offering/DomainAndZoneIdResolverTest.java @@ -0,0 +1,149 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.api.command.offering; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.lang.reflect.Field; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.LongFunction; + +import com.cloud.dc.DataCenter; +import com.cloud.domain.Domain; +import com.cloud.exception.ConcurrentOperationException; +import com.cloud.exception.InsufficientCapacityException; +import com.cloud.exception.InvalidParameterValueException; +import com.cloud.exception.NetworkRuleConflictException; +import com.cloud.exception.ResourceAllocationException; +import com.cloud.exception.ResourceUnavailableException; +import com.cloud.utils.db.EntityManager; +import org.apache.cloudstack.api.BaseCmd; +import org.apache.cloudstack.api.ServerApiException; +import org.junit.Assert; +import org.junit.Test; + +public class DomainAndZoneIdResolverTest { + static class TestCmd extends BaseCmd implements DomainAndZoneIdResolver { + @Override + public void execute() throws ResourceUnavailableException, InsufficientCapacityException, ServerApiException, ConcurrentOperationException, ResourceAllocationException, NetworkRuleConflictException { + // No implementation needed for tests + } + + @Override + public String getCommandName() { + return "test"; + } + + @Override + public long getEntityOwnerId() { + return 1L; + } + } + + private void setEntityMgr(final BaseCmd cmd, final EntityManager entityMgr) throws Exception { + Field f = BaseCmd.class.getDeclaredField("_entityMgr"); + f.setAccessible(true); + f.set(cmd, entityMgr); + } + + @Test + public void resolveDomainIds_usesDefaultProviderWhenEmpty() { + TestCmd cmd = new TestCmd(); + + final LongFunction> defaultsProvider = id -> Arrays.asList(100L, 200L); + + List result = cmd.resolveDomainIds("", 42L, defaultsProvider, "offering"); + Assert.assertEquals(Arrays.asList(100L, 200L), result); + } + + @Test + public void resolveDomainIds_resolvesValidUuids() throws Exception { + TestCmd cmd = new TestCmd(); + + EntityManager em = mock(EntityManager.class); + setEntityMgr(cmd, em); + + Domain d1 = mock(Domain.class); + when(d1.getId()).thenReturn(10L); + Domain d2 = mock(Domain.class); + when(d2.getId()).thenReturn(20L); + + when(em.findByUuid(Domain.class, "uuid1")).thenReturn(d1); + when(em.findByUuid(Domain.class, "uuid2")).thenReturn(d2); + + List ids = cmd.resolveDomainIds("uuid1, public, uuid2", null, null, "template"); + Assert.assertEquals(Arrays.asList(10L, 20L), ids); + } + + @Test + public void resolveDomainIds_invalidUuid_throws() throws Exception { + TestCmd cmd = new TestCmd(); + + EntityManager em = mock(EntityManager.class); + setEntityMgr(cmd, em); + + when(em.findByUuid(Domain.class, "bad-uuid")).thenReturn(null); + + Assert.assertThrows(InvalidParameterValueException.class, + () -> cmd.resolveDomainIds("bad-uuid", null, null, "offering")); + } + + @Test + public void resolveZoneIds_usesDefaultProviderWhenEmpty() { + TestCmd cmd = new TestCmd(); + + final LongFunction> defaultsProvider = id -> Collections.singletonList(300L); + + List result = cmd.resolveZoneIds("", 99L, defaultsProvider, "offering"); + Assert.assertEquals(Collections.singletonList(300L), result); + } + + @Test + public void resolveZoneIds_resolvesValidUuids() throws Exception { + TestCmd cmd = new TestCmd(); + + EntityManager em = mock(EntityManager.class); + setEntityMgr(cmd, em); + + DataCenter z1 = mock(DataCenter.class); + when(z1.getId()).thenReturn(30L); + DataCenter z2 = mock(DataCenter.class); + when(z2.getId()).thenReturn(40L); + + when(em.findByUuid(DataCenter.class, "zone-1")).thenReturn(z1); + when(em.findByUuid(DataCenter.class, "zone-2")).thenReturn(z2); + + List ids = cmd.resolveZoneIds("zone-1, all, zone-2", null, null, "service"); + Assert.assertEquals(Arrays.asList(30L, 40L), ids); + } + + @Test + public void resolveZoneIds_invalidUuid_throws() throws Exception { + TestCmd cmd = new TestCmd(); + + EntityManager em = mock(EntityManager.class); + setEntityMgr(cmd, em); + + when(em.findByUuid(DataCenter.class, "bad-zone")).thenReturn(null); + + Assert.assertThrows(InvalidParameterValueException.class, + () -> cmd.resolveZoneIds("bad-zone", null, null, "offering")); + } +} diff --git a/api/src/test/java/org/apache/cloudstack/api/command/test/ScaleVMCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/test/ScaleVMCmdTest.java index 46ff0e14c8f3..1150c40ba488 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/test/ScaleVMCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/test/ScaleVMCmdTest.java @@ -78,10 +78,6 @@ public void testCreateSuccess() { scaleVMCmd._responseGenerator = responseGenerator; UserVmResponse userVmResponse = Mockito.mock(UserVmResponse.class); - //List list = Mockito.mock(UserVmResponse.class); - //list.add(userVmResponse); - //LinkedList mockedList = Mockito.mock(LinkedList.class); - //Mockito.when(mockedList.get(0)).thenReturn(userVmResponse); List list = new LinkedList(); list.add(userVmResponse); diff --git a/api/src/test/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmdTest.java b/api/src/test/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmdTest.java index e9605526f86f..8fac32d8f92f 100644 --- a/api/src/test/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmdTest.java +++ b/api/src/test/java/org/apache/cloudstack/api/command/user/userdata/RegisterUserDataCmdTest.java @@ -97,7 +97,7 @@ public void validateArgsCmd() { ReflectionTestUtils.setField(cmd, "name", "testUserdataName"); ReflectionTestUtils.setField(cmd, "userData", "testUserdata"); - when(_accountService.finalyzeAccountId(ACCOUNT_NAME, DOMAIN_ID, PROJECT_ID, true)).thenReturn(200L); + when(_accountService.finalizeAccountId(ACCOUNT_NAME, DOMAIN_ID, PROJECT_ID, true)).thenReturn(200L); Assert.assertEquals("testUserdataName", cmd.getName()); Assert.assertEquals("testUserdata", cmd.getUserData()); diff --git a/client/bindir/cloud-setup-management.in b/client/bindir/cloud-setup-management.in index 84c87ae2e442..b4fe76cc8d8a 100755 --- a/client/bindir/cloud-setup-management.in +++ b/client/bindir/cloud-setup-management.in @@ -36,6 +36,106 @@ from cloudutils.cloudException import CloudRuntimeException, CloudInternalExcept from cloudutils.globalEnv import globalEnv from cloudutils.serviceConfigServer import cloudManagementConfig from optparse import OptionParser +import urllib.request +import configparser +import hashlib + +SYSTEMVM_TEMPLATES_PATH = "/usr/share/cloudstack-management/templates/systemvm" +SYSTEMVM_TEMPLATES_METADATA_FILE = SYSTEMVM_TEMPLATES_PATH + "/metadata.ini" + +def verify_sha512_checksum(file_path, expected_checksum): + sha512 = hashlib.sha512() + try: + with open(file_path, "rb") as f: + for chunk in iter(lambda: f.read(8192), b""): + sha512.update(chunk) + return sha512.hexdigest().lower() == expected_checksum.lower() + except Exception as e: + print(f"Failed to verify checksum for {file_path}: {e}") + return False + +def download_file(url, dest_path, chunk_size=8 * 1024 * 1024): + """ + Downloads a file from the given URL to the specified destination path in chunks. + """ + try: + with urllib.request.urlopen(url) as response: + total_size = response.length if response.length else None + downloaded = 0 + try: + with open(dest_path, 'wb') as out_file: + while True: + chunk = response.read(chunk_size) + if not chunk: + break + out_file.write(chunk) + downloaded += len(chunk) + if total_size: + print(f"Downloaded {downloaded / (1024 * 1024):.2f}MB of {total_size / (1024 * 1024):.2f}MB", end='\r') + except PermissionError as pe: + print(f"Permission denied: {dest_path}") + raise + print(f"\nDownloaded file from {url} to {dest_path}") + except Exception as e: + print(f"Failed to download file: {e}") + raise + +def download_template_if_needed(template, url, filename, checksum): + dest_path = os.path.join(SYSTEMVM_TEMPLATES_PATH, filename) + if os.path.exists(dest_path): + if checksum and verify_sha512_checksum(dest_path, checksum): + print(f"{template} System VM template already exists at {dest_path} with valid checksum, skipping download.") + return + else: + print(f"{template} System VM template at {dest_path} has invalid or missing checksum, re-downloading...") + else: + print(f"Downloading {template} System VM template from {url} to {dest_path}...") + try: + download_file(url, dest_path) + #After download, verify checksum if provided + if checksum: + if verify_sha512_checksum(dest_path, checksum): + print(f"{template} System VM template downloaded and verified successfully.") + else: + print(f"ERROR: Checksum verification failed for {template} System VM template after download.") + except Exception as e: + print(f"ERROR: Failed to download {template} System VM template: {e}") + +def collect_template_metadata(selected_templates, options): + template_metadata_list = [] + if not os.path.exists(SYSTEMVM_TEMPLATES_METADATA_FILE): + print(f"ERROR: System VM templates metadata file not found at {SYSTEMVM_TEMPLATES_METADATA_FILE}, cannot download templates.") + sys.exit(1) + config = configparser.ConfigParser() + config.read(SYSTEMVM_TEMPLATES_METADATA_FILE) + template_repo_url = None + if options.systemvm_templates_repository: + if "default" in config and "downloadrepository" in config["default"]: + template_repo_url = config["default"]["downloadrepository"].strip() + if not template_repo_url: + print("ERROR: downloadrepository value is empty in metadata file, cannot use --systemvm-template-repository option.") + sys.exit(1) + for template in selected_templates: + if template in config: + url = config[template].get("downloadurl") + filename = config[template].get("filename") + checksum = config[template].get("checksum") + if url and filename: + if template_repo_url: + url = url.replace(template_repo_url, options.systemvm_templates_repository) + template_metadata_list.append({ + "template": template, + "url": url, + "filename": filename, + "checksum": checksum + }) + else: + print(f"ERROR: URL or filename not found for {template} System VM template in metadata.") + sys.exit(1) + else: + print(f"ERROR: No metadata found for {template} System VM template.") + sys.exit(1) + return template_metadata_list if __name__ == '__main__': initLoging("@MSLOGDIR@/setupManagement.log") @@ -45,6 +145,16 @@ if __name__ == '__main__': parser.add_option("--https", action="store_true", dest="https", help="Enable HTTPs connection of management server") parser.add_option("--tomcat7", action="store_true", dest="tomcat7", help="Depreciated option, don't use it") parser.add_option("--no-start", action="store_true", dest="nostart", help="Do not start management server after successful configuration") + parser.add_option( + "--systemvm-templates", + dest="systemvm_templates", + help="Specify System VM templates to download: all, kvm-aarch64, kvm-x86_64, xenserver, vmware or comma-separated list of hypervisor combinations (e.g., kvm-x86_64,xenserver). Default is kvm-x86_64.", + ) + parser.add_option( + "--systemvm-templates-repository", + dest="systemvm_templates_repository", + help="Specify the URL to download System VM templates from." + ) (options, args) = parser.parse_args() if options.https: glbEnv.svrMode = "HttpsServer" @@ -53,6 +163,34 @@ if __name__ == '__main__': if options.nostart: glbEnv.noStart = True + available_templates = ["kvm-aarch64", "kvm-x86_64", "xenserver", "vmware"] + templates_arg = options.systemvm_templates + + selected_templates = ["kvm-x86_64"] + if templates_arg: + templates_list = [t.strip().lower() for t in templates_arg.split(",")] + if "all" in templates_list: + if len(templates_list) > 1: + print("WARNING: 'all' specified for System VM templates, ignoring other specified templates.") + selected_templates = available_templates + else: + invalid_templates = [] + for t in templates_list: + if t in available_templates: + if t not in selected_templates: + selected_templates.append(t) + else: + if t not in invalid_templates: + invalid_templates.append(t) + if invalid_templates: + print(f"ERROR: Invalid System VM template names provided: {', '.join(invalid_templates)}") + sys.exit(1) + print(f"Selected systemvm templates to download: {', '.join(selected_templates) if selected_templates else 'None'}") + + template_metadata_list = [] + if selected_templates: + template_metadata_list = collect_template_metadata(selected_templates, options) + glbEnv.mode = "Server" print("Starting to configure CloudStack Management Server:") @@ -74,3 +212,6 @@ if __name__ == '__main__': syscfg.restore() except: pass + + for meta in template_metadata_list: + download_template_if_needed(meta["template"], meta["url"], meta["filename"], meta["checksum"]) diff --git a/client/conf/server.properties.in b/client/conf/server.properties.in index 5958486b4dff..7c5e3f925b08 100644 --- a/client/conf/server.properties.in +++ b/client/conf/server.properties.in @@ -62,3 +62,8 @@ extensions.deployment.mode=@EXTENSIONSDEPLOYMENTMODE@ # Thread pool configuration #threads.min=10 #threads.max=500 + +# The URL prefix for the system VM templates repository. When downloading system VM templates, the server replaces the +# `downloadrepository` key value from the metadata file in template URLs. If not specified, the original template URL +# will be used for download. +# system.vm.templates.download.repository=http://download.cloudstack.org/systemvm/ diff --git a/client/pom.xml b/client/pom.xml index 94d844be3c42..bb48625eb823 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -25,7 +25,7 @@ org.apache.cloudstack cloudstack - 4.22.1.0-SNAPSHOT + 4.23.0.0-SNAPSHOT @@ -316,11 +316,6 @@ cloud-plugin-hypervisor-baremetal ${project.version} - - org.apache.cloudstack - cloud-plugin-hypervisor-ucs - ${project.version} - org.apache.cloudstack cloud-plugin-hypervisor-kvm @@ -332,11 +327,6 @@ - - org.apache.cloudstack - cloud-plugin-hypervisor-hyperv - ${project.version} - org.apache.cloudstack cloud-plugin-hypervisor-external diff --git a/core/pom.xml b/core/pom.xml index 1bf7c28674d6..f91a330b7ba9 100644 --- a/core/pom.xml +++ b/core/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloudstack - 4.22.1.0-SNAPSHOT + 4.23.0.0-SNAPSHOT diff --git a/core/src/main/java/com/cloud/network/HAProxyConfigurator.java b/core/src/main/java/com/cloud/network/HAProxyConfigurator.java index 7736bea3cdaf..128652fc64fa 100644 --- a/core/src/main/java/com/cloud/network/HAProxyConfigurator.java +++ b/core/src/main/java/com/cloud/network/HAProxyConfigurator.java @@ -629,9 +629,6 @@ public String[] generateConfiguration(final LoadBalancerConfigCommand lbCmd) { } } result.addAll(gSection); - // TODO decide under what circumstances these options are needed - // result.add("\tnokqueue"); - // result.add("\tnopoll"); result.add(blankLine); final List dSection = Arrays.asList(defaultsSection); diff --git a/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java b/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java index 201242564ba6..4196587cc3f2 100644 --- a/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java +++ b/core/src/test/java/com/cloud/agent/resource/virtualnetwork/VirtualRoutingResourceTest.java @@ -417,8 +417,6 @@ private void verifyArgs(final SetNetworkACLCommand cmd, final String script, fin // FIXME Check the json content assertEquals(VRScripts.UPDATE_CONFIG, script); assertEquals(VRScripts.NETWORK_ACL_CONFIG, args); - // assertEquals(args, " -d eth3 -M 01:23:45:67:89:AB -i 192.168.1.1 -m 24 -a Egress:ALL:0:0:192.168.0.1/24-192.168.0.2/24:ACCEPT:," + - // "Ingress:ICMP:0:0:192.168.0.1/24-192.168.0.2/24:DROP:,Ingress:TCP:20:80:192.168.0.1/24-192.168.0.2/24:ACCEPT:,"); break; case 2: assertEquals(VRScripts.UPDATE_CONFIG, script); @@ -464,8 +462,6 @@ protected SetupGuestNetworkCommand generateSetupGuestNetworkCommand() { private void verifyArgs(final SetupGuestNetworkCommand cmd, final String script, final String args) { // TODO Check the contents of the json file - //assertEquals(script, VRScripts.VPC_GUEST_NETWORK); - //assertEquals(args, " -C -M 01:23:45:67:89:AB -d eth4 -i 10.1.1.2 -g 10.1.1.1 -m 24 -n 10.1.1.0 -s 8.8.8.8,8.8.4.4 -e cloud.test"); } @Test diff --git a/debian/changelog b/debian/changelog index 6d288afc4db0..02251137e9d0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,12 +1,12 @@ -cloudstack (4.22.1.0-SNAPSHOT) unstable; urgency=low +cloudstack (4.23.0.0-SNAPSHOT) unstable; urgency=low - * Update the version to 4.22.1.0-SNAPSHOT + * Update the version to 4.23.0.0-SNAPSHOT -- the Apache CloudStack project Thu, 30 Oct 2025 19:23:55 +0530 -cloudstack (4.22.1.0-SNAPSHOT-SNAPSHOT) unstable; urgency=low +cloudstack (4.23.0.0-SNAPSHOT-SNAPSHOT) unstable; urgency=low - * Update the version to 4.22.1.0-SNAPSHOT-SNAPSHOT + * Update the version to 4.23.0.0-SNAPSHOT-SNAPSHOT -- the Apache CloudStack project Thu, Aug 28 11:58:36 2025 +0530 diff --git a/developer/pom.xml b/developer/pom.xml index de6a8ef3d10a..e2fd782fd25f 100644 --- a/developer/pom.xml +++ b/developer/pom.xml @@ -25,7 +25,7 @@ org.apache.cloudstack cloudstack - 4.22.1.0-SNAPSHOT + 4.23.0.0-SNAPSHOT diff --git a/engine/api/pom.xml b/engine/api/pom.xml index 2f7e15aaab05..cb50ef0cd46b 100644 --- a/engine/api/pom.xml +++ b/engine/api/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.22.1.0-SNAPSHOT + 4.23.0.0-SNAPSHOT ../pom.xml diff --git a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java index 168822c21ebc..6f8c46304567 100644 --- a/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java +++ b/engine/api/src/main/java/org/apache/cloudstack/engine/orchestration/service/VolumeOrchestrationService.java @@ -90,11 +90,11 @@ public interface VolumeOrchestrationService { "volume.allocation.algorithm", "Advanced", "random", - "Order in which storage pool within a cluster will be considered for volume allocation. The value can be 'random', 'firstfit', 'userdispersing', 'userconcentratedpod_random', 'userconcentratedpod_firstfit', or 'firstfitleastconsumed'.", + "Order in which storage pool within a cluster will be considered for volume allocation. The value can be 'random', 'firstfit', 'userdispersing', or 'firstfitleastconsumed'.", true, ConfigKey.Scope.Global, null, null, null, null, null, ConfigKey.Kind.Select, - "random,firstfit,userdispersing,userconcentratedpod_random,userconcentratedpod_firstfit,firstfitleastconsumed"); + "random,firstfit,userdispersing,firstfitleastconsumed"); VolumeInfo moveVolume(VolumeInfo volume, long destPoolDcId, Long destPoolPodId, Long destPoolClusterId, HypervisorType dataDiskHyperType) throws ConcurrentOperationException, StorageUnavailableException; diff --git a/engine/components-api/pom.xml b/engine/components-api/pom.xml index 8caf8ccbff69..49d41d36f83d 100644 --- a/engine/components-api/pom.xml +++ b/engine/components-api/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.22.1.0-SNAPSHOT + 4.23.0.0-SNAPSHOT ../pom.xml diff --git a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java index 3e3901cb2933..99504d4bc458 100644 --- a/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java +++ b/engine/components-api/src/main/java/com/cloud/storage/StorageManager.java @@ -234,7 +234,7 @@ public interface StorageManager extends StorageService { /** * should we execute in sequence not involving any storages? - * @return tru if commands should execute in sequence + * @return true if commands should execute in sequence */ static boolean shouldExecuteInSequenceOnVmware() { return shouldExecuteInSequenceOnVmware(null, null); diff --git a/engine/components-api/src/main/java/com/cloud/vm/VmWorkSerializer.java b/engine/components-api/src/main/java/com/cloud/vm/VmWorkSerializer.java index bd6b52d261fa..e4fdc0c4f375 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/VmWorkSerializer.java +++ b/engine/components-api/src/main/java/com/cloud/vm/VmWorkSerializer.java @@ -61,7 +61,6 @@ public static String serialize(VmWork work) { // use java binary serialization instead // return JobSerializerHelper.toObjectSerializedString(work); - // return s_gson.toJson(work); } public static T deserialize(Class clazz, String workInJsonText) { @@ -69,6 +68,5 @@ public static T deserialize(Class clazz, String workInJson // use java binary serialization instead // return (T)JobSerializerHelper.fromObjectSerializedString(workInJsonText); - // return (T)s_gson.fromJson(workInJsonText, clazz); } } diff --git a/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java b/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java index 997b413c099c..6478469f190c 100644 --- a/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java +++ b/engine/components-api/src/main/java/com/cloud/vm/snapshot/VMSnapshotManager.java @@ -42,7 +42,7 @@ public interface VMSnapshotManager extends VMSnapshotService, Manager { boolean deleteAllVMSnapshots(long id, VMSnapshot.Type type); /** - * Sync VM snapshot state when VM snapshot in reverting or snapshoting or expunging state + * Sync VM snapshot state when VM snapshot in reverting or snapshotting or expunging state * Used for fullsync after agent connects * * @param vm, the VM in question diff --git a/engine/orchestration/pom.xml b/engine/orchestration/pom.xml index cd5578d245ca..a8e7001baf8a 100755 --- a/engine/orchestration/pom.xml +++ b/engine/orchestration/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.22.1.0-SNAPSHOT + 4.23.0.0-SNAPSHOT ../pom.xml @@ -78,6 +78,12 @@ cloud-plugin-hypervisor-external ${project.version} + + org.apache.cloudstack + cloud-api + 4.23.0.0-SNAPSHOT + compile + diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java index 3d398ca5dd95..439bdf92ddd7 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/AgentManagerImpl.java @@ -1652,7 +1652,6 @@ protected void processRequest(final Link link, final Request request) { final String reason = shutdown.getReason(); logger.info("Host {} has informed us that it is shutting down with reason {} and detail {}", attache, reason, shutdown.getDetail()); if (reason.equals(ShutdownCommand.Update)) { - // disconnectWithoutInvestigation(attache, Event.UpdateNeeded); throw new CloudRuntimeException("Agent update not implemented"); } else if (reason.equals(ShutdownCommand.Requested)) { disconnectWithoutInvestigation(attache, Event.ShutdownRequested); @@ -1753,7 +1752,6 @@ protected void doTask(final Task task) throws TaskExecutionException { } } catch (final UnsupportedVersionException e) { logger.warn(e.getMessage()); - // upgradeAgent(task.getLink(), data, e.getReason()); } catch (final ClassNotFoundException e) { final String message = String.format("Exception occurred when executing tasks! Error '%s'", e.getMessage()); logger.error(message); diff --git a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java index ffc993645adc..cfa0949883fd 100644 --- a/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/agent/manager/ClusteredAgentManagerImpl.java @@ -965,7 +965,6 @@ protected void runInContext() { synchronized (_agentToTransferIds) { if (!_agentToTransferIds.isEmpty()) { logger.debug("Found {} agents to transfer", _agentToTransferIds.size()); - // for (Long hostId : _agentToTransferIds) { for (final Iterator iterator = _agentToTransferIds.iterator(); iterator.hasNext(); ) { final Long hostId = iterator.next(); final AgentAttache attache = findAttache(hostId); diff --git a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java index 86f456306110..20b7521d8dc1 100755 --- a/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java +++ b/engine/orchestration/src/main/java/com/cloud/vm/VirtualMachineManagerImpl.java @@ -2010,7 +2010,7 @@ public boolean getExecuteInSequence(final HypervisorType hypervisorType) { return ExecuteInSequence.value(); } - if (Set.of(HypervisorType.KVM, HypervisorType.XenServer, HypervisorType.Hyperv, HypervisorType.LXC).contains(hypervisorType)) { + if (Set.of(HypervisorType.KVM, HypervisorType.XenServer, HypervisorType.LXC).contains(hypervisorType)) { return false; } else if (hypervisorType.equals(HypervisorType.VMware)) { return StorageManager.shouldExecuteInSequenceOnVmware(); @@ -5406,8 +5406,7 @@ private void handlePowerOffReportWithNoPendingJobsOnVM(final VMInstanceVO vm) { , vm, vm.getState(), vm.getPowerState()); if((HighAvailabilityManager.ForceHA.value() || vm.isHaEnabled()) && vm.getState() == State.Running && HaVmRestartHostUp.value() - && vm.getHypervisorType() != HypervisorType.VMware - && vm.getHypervisorType() != HypervisorType.Hyperv) { + && vm.getHypervisorType() != HypervisorType.VMware) { logger.info("Detected out-of-band stop of a HA enabled VM {}, will schedule restart.", vm); if (!_haMgr.hasPendingHaWork(vm.getId())) { _haMgr.scheduleRestart(vm, true); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java index 2ad8d15d0b71..7f6571becc83 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/datacenter/entity/api/db/dao/EngineHostDaoImpl.java @@ -213,7 +213,6 @@ public EngineHostDaoImpl() { SequenceSearch = createSearchBuilder(); SequenceSearch.and("id", SequenceSearch.entity().getId(), SearchCriteria.Op.EQ); - // SequenceSearch.addRetrieve("sequence", SequenceSearch.entity().getSequence()); SequenceSearch.done(); DirectlyConnectedSearch = createSearchBuilder(); diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java index af8ff83396db..c3c480693db6 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java @@ -1156,8 +1156,6 @@ private ImageFormat getSupportedImageFormatForCluster(HypervisorType hyperType) return ImageFormat.OVA; } else if (hyperType == HypervisorType.Ovm) { return ImageFormat.RAW; - } else if (hyperType == HypervisorType.Hyperv) { - return ImageFormat.VHDX; } else { return null; } @@ -1165,15 +1163,7 @@ private ImageFormat getSupportedImageFormatForCluster(HypervisorType hyperType) private boolean isSupportedImageFormatForCluster(VolumeInfo volume, HypervisorType rootDiskHyperType) { ImageFormat volumeFormat = volume.getFormat(); - if (rootDiskHyperType == HypervisorType.Hyperv) { - if (volumeFormat.equals(ImageFormat.VHDX) || volumeFormat.equals(ImageFormat.VHD)) { - return true; - } else { - return false; - } - } else { - return volume.getFormat().equals(getSupportedImageFormatForCluster(rootDiskHyperType)); - } + return volume.getFormat().equals(getSupportedImageFormatForCluster(rootDiskHyperType)); } private VolumeInfo copyVolume(StoragePool rootDiskPool, VolumeInfo volumeInfo, VirtualMachine vm, VirtualMachineTemplate rootDiskTmplt, DataCenter dcVO, Pod pod, DiskOffering diskVO, @@ -1583,12 +1573,8 @@ public void prepareForMigration(VirtualMachineProfile vm, DeployDestination dest vm.addDisk(disk); } - //if (vm.getType() == VirtualMachine.Type.User && vm.getTemplate().getFormat() == ImageFormat.ISO) { if (vm.getType() == VirtualMachine.Type.User) { _tmpltMgr.prepareIsoForVmProfile(vm, dest); - //DataTO dataTO = tmplFactory.getTemplate(vm.getTemplate().getId(), DataStoreRole.Image, vm.getVirtualMachine().getDataCenterId()).getTO(); - //DiskTO iso = new DiskTO(dataTO, 3L, null, Volume.Type.ISO); - //vm.addDisk(iso); } } diff --git a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/service/api/ProvisioningServiceImpl.java b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/service/api/ProvisioningServiceImpl.java index 51e87663919b..ff75aa0cbb65 100644 --- a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/service/api/ProvisioningServiceImpl.java +++ b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/service/api/ProvisioningServiceImpl.java @@ -140,20 +140,12 @@ public List listHosts() { @Override public List listPods() { - /* - * Not in use now, just commented out. - */ - //List pods = new ArrayList(); - //pods.add(new PodEntityImpl("pod-uuid-1", "pod1")); - //pods.add(new PodEntityImpl("pod-uuid-2", "pod2")); return null; } @Override public List listZones() { List zones = new ArrayList(); - //zones.add(new ZoneEntityImpl("zone-uuid-1")); - //zones.add(new ZoneEntityImpl("zone-uuid-2")); return zones; } diff --git a/engine/pom.xml b/engine/pom.xml index 821a4f8f54ce..2de84eab85af 100644 --- a/engine/pom.xml +++ b/engine/pom.xml @@ -25,7 +25,7 @@ org.apache.cloudstack cloudstack - 4.22.1.0-SNAPSHOT + 4.23.0.0-SNAPSHOT ../pom.xml diff --git a/engine/schema/pom.xml b/engine/schema/pom.xml index 7d88f649245c..ec12346a8b0e 100644 --- a/engine/schema/pom.xml +++ b/engine/schema/pom.xml @@ -24,7 +24,7 @@ org.apache.cloudstack cloud-engine - 4.22.1.0-SNAPSHOT + 4.23.0.0-SNAPSHOT ../pom.xml @@ -57,6 +57,12 @@ ini4j ${cs.ini.version} + + org.apache.cloudstack + cloud-api + 4.23.0.0-SNAPSHOT + compile + @@ -105,7 +111,6 @@ templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-vmware") templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-xen") templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-ovm") - templateList.add("systemvmtemplate-${csVersion}.${patch}-x86_64-hyperv") File file = new File("./engine/schema/dist/systemvm-templates/sha512sum.txt") def lines = file.readLines() for (template in templateList) { @@ -321,41 +326,5 @@ - - download-hyperv-systemvm-template - - - systemvm-hyperv - - - - - - org.apache.maven.plugins - maven-resources-plugin - ${cs.resources-plugin.version} - - - com.googlecode.maven-download-plugin - download-maven-plugin - 1.6.3 - - - download-hyperv-template - - wget - - - true - ${project.systemvm.template.location}/${cs.version}/systemvmtemplate-${cs.version}.${patch.version}-x86_64-hyperv.vhd.zip - ${basedir}/dist/systemvm-templates/ - ${hyperv.checksum} - - - - - - - diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java index 02a7ac6977c2..76058d213338 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/ClusterVSMMapDaoImpl.java @@ -36,7 +36,6 @@ public class ClusterVSMMapDaoImpl extends GenericDaoBase final SearchBuilder VsmSearch; public ClusterVSMMapDaoImpl() { - //super(); ClusterSearch = createSearchBuilder(); ClusterSearch.and("clusterId", ClusterSearch.entity().getClusterId(), SearchCriteria.Op.EQ); @@ -82,8 +81,6 @@ public boolean remove(Long id) { TransactionLegacy txn = TransactionLegacy.currentTxn(); txn.start(); ClusterVSMMapVO cluster = createForUpdate(); - //cluster.setClusterId(null); - //cluster.setVsmId(null); update(id, cluster); diff --git a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java index bb03a96d02ee..aec54e20d989 100644 --- a/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/dc/dao/DataCenterDetailsDaoImpl.java @@ -31,7 +31,8 @@ public class DataCenterDetailsDaoImpl extends ResourceDetailsDaoBase DetailSearch; - DataCenterDetailsDaoImpl() { + public DataCenterDetailsDaoImpl() { + super(); DetailSearch = createSearchBuilder(); DetailSearch.and("zoneId", DetailSearch.entity().getResourceId(), SearchCriteria.Op.EQ); DetailSearch.and("name", DetailSearch.entity().getName(), SearchCriteria.Op.EQ); diff --git a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java index 09d9f1d7fbf9..90416d117703 100644 --- a/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/dao/PhysicalNetworkTrafficTypeDaoImpl.java @@ -123,14 +123,8 @@ public String getNetworkTag(long physicalNetworkId, TrafficType trafficType, Hyp sc = vmWareAllFieldsSearch.create(); } else if (hType == HypervisorType.Simulator) { sc = simulatorAllFieldsSearch.create(); - } else if (hType == HypervisorType.Ovm) { - sc = ovmAllFieldsSearch.create(); } else if (hType == HypervisorType.BareMetal || hType == HypervisorType.External) { return null; - } else if (hType == HypervisorType.Hyperv) { - sc = hypervAllFieldsSearch.create(); - } else if (hType == HypervisorType.Ovm3) { - sc = ovm3AllFieldsSearch.create(); } else { assert (false) : "We don't handle this hypervisor type"; return null; diff --git a/engine/schema/src/main/java/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java b/engine/schema/src/main/java/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java index 9a9ca80bce59..7ed0ad0bcc54 100644 --- a/engine/schema/src/main/java/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/network/security/dao/VmRulesetLogDaoImpl.java @@ -76,7 +76,6 @@ public VmRulesetLogVO findByVmId(long vmId) { @Override public int createOrUpdate(Set workItems) { - //return createOrUpdateUsingBatch(workItems); return createOrUpdateUsingMultiInsert(workItems); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java index 6785c3653290..4c9f906b68a9 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDao.java @@ -94,7 +94,7 @@ public interface VMTemplateDao extends GenericDao, StateDao< List listByParentTemplatetId(long parentTemplatetId); - VMTemplateVO findLatestTemplateByName(String name, CPU.CPUArch arch); + VMTemplateVO findLatestTemplateByName(String name, HypervisorType hypervisorType, CPU.CPUArch arch); List findTemplatesLinkedToUserdata(long userdataId); @@ -103,4 +103,7 @@ public interface VMTemplateDao extends GenericDao, StateDao< List listIdsByTemplateTag(String tag); List listIdsByExtensionId(long extensionId); + + VMTemplateVO findActiveSystemTemplateByHypervisorArchAndUrlPath(HypervisorType hypervisorType, + CPU.CPUArch arch, String urlPathSuffix); } diff --git a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java index 7d46aa9f225d..bcf8b39a291f 100644 --- a/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/storage/dao/VMTemplateDaoImpl.java @@ -100,7 +100,6 @@ public class VMTemplateDaoImpl extends GenericDaoBase implem private SearchBuilder PublicIsoSearch; private SearchBuilder UserIsoSearch; private GenericSearchBuilder CountTemplatesByAccount; - // private SearchBuilder updateStateSearch; private SearchBuilder AllFieldsSearch; protected SearchBuilder ParentTemplateIdSearch; private SearchBuilder InactiveUnremovedTmpltSearch; @@ -246,13 +245,17 @@ public List listReadyTemplates() { @Override - public VMTemplateVO findLatestTemplateByName(String name, CPU.CPUArch arch) { + public VMTemplateVO findLatestTemplateByName(String name, HypervisorType hypervisorType, CPU.CPUArch arch) { SearchBuilder sb = createSearchBuilder(); sb.and("name", sb.entity().getName(), SearchCriteria.Op.EQ); + sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ); sb.and("arch", sb.entity().getArch(), SearchCriteria.Op.EQ); sb.done(); SearchCriteria sc = sb.create(); sc.setParameters("name", name); + if (hypervisorType != null) { + sc.setParameters("hypervisorType", hypervisorType); + } if (arch != null) { sc.setParameters("arch", arch); } @@ -404,12 +407,6 @@ public boolean configure(String name, Map params) throws Configu CountTemplatesByAccount.and("state", CountTemplatesByAccount.entity().getState(), SearchCriteria.Op.EQ); CountTemplatesByAccount.done(); - // updateStateSearch = this.createSearchBuilder(); - // updateStateSearch.and("id", updateStateSearch.entity().getId(), Op.EQ); - // updateStateSearch.and("state", updateStateSearch.entity().getState(), Op.EQ); - // updateStateSearch.and("updatedCount", updateStateSearch.entity().getUpdatedCount(), Op.EQ); - // updateStateSearch.done(); - AllFieldsSearch = createSearchBuilder(); AllFieldsSearch.and("state", AllFieldsSearch.entity().getState(), SearchCriteria.Op.EQ); AllFieldsSearch.and("accountId", AllFieldsSearch.entity().getAccountId(), SearchCriteria.Op.EQ); @@ -857,6 +854,37 @@ public List listIdsByExtensionId(long extensionId) { return customSearch(sc, null); } + @Override + public VMTemplateVO findActiveSystemTemplateByHypervisorArchAndUrlPath(HypervisorType hypervisorType, + CPU.CPUArch arch, String urlPathSuffix) { + if (StringUtils.isBlank(urlPathSuffix)) { + return null; + } + SearchBuilder sb = createSearchBuilder(); + sb.and("templateType", sb.entity().getTemplateType(), SearchCriteria.Op.EQ); + sb.and("hypervisorType", sb.entity().getHypervisorType(), SearchCriteria.Op.EQ); + sb.and("arch", sb.entity().getArch(), SearchCriteria.Op.EQ); + sb.and("urlPathSuffix", sb.entity().getUrl(), SearchCriteria.Op.LIKE); + sb.and("state", sb.entity().getState(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("templateType", TemplateType.SYSTEM); + if (hypervisorType != null) { + sc.setParameters("hypervisorType", hypervisorType); + } + if (arch != null) { + sc.setParameters("arch", arch); + } + sc.setParameters("urlPathSuffix", "%" + urlPathSuffix); + sc.setParameters("state", VirtualMachineTemplate.State.Active); + Filter filter = new Filter(VMTemplateVO.class, "id", false, null, 1L); + List templates = listBy(sc, filter); + if (CollectionUtils.isNotEmpty(templates)) { + return templates.get(0); + } + return null; + } + @Override public boolean updateState( com.cloud.template.VirtualMachineTemplate.State currentState, diff --git a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java index 296f80f4b5e1..0e784d961b3d 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java @@ -33,11 +33,10 @@ import javax.inject.Inject; -import com.cloud.utils.FileUtil; import org.apache.cloudstack.utils.CloudStackVersion; import org.apache.commons.lang3.StringUtils; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import com.cloud.upgrade.dao.DbUpgrade; import com.cloud.upgrade.dao.DbUpgradeSystemVmTemplate; @@ -94,6 +93,7 @@ import com.cloud.upgrade.dao.Upgrade42200to42210; import com.cloud.upgrade.dao.Upgrade420to421; import com.cloud.upgrade.dao.Upgrade421to430; +import com.cloud.upgrade.dao.Upgrade42210to42300; import com.cloud.upgrade.dao.Upgrade430to440; import com.cloud.upgrade.dao.Upgrade431to440; import com.cloud.upgrade.dao.Upgrade432to440; @@ -122,6 +122,7 @@ import com.cloud.upgrade.dao.VersionDaoImpl; import com.cloud.upgrade.dao.VersionVO; import com.cloud.upgrade.dao.VersionVO.Step; +import com.cloud.utils.FileUtil; import com.cloud.utils.component.SystemIntegrityChecker; import com.cloud.utils.crypt.DBEncryptionUtil; import com.cloud.utils.db.GlobalLock; @@ -242,6 +243,7 @@ public DatabaseUpgradeChecker() { .next("4.20.1.0", new Upgrade42010to42100()) .next("4.21.0.0", new Upgrade42100to42200()) .next("4.22.0.0", new Upgrade42200to42210()) + .next("4.22.1.0", new Upgrade42210to42300()) .build(); } @@ -318,20 +320,20 @@ protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion currentVer } protected void executeProcedureScripts() { - LOGGER.info(String.format("Executing Stored Procedure scripts that are under resource directory [%s].", PROCEDURES_DIRECTORY)); + LOGGER.info("Executing Stored Procedure scripts that are under resource directory [{}].", PROCEDURES_DIRECTORY); List filesPathUnderViewsDirectory = FileUtil.getFilesPathsUnderResourceDirectory(PROCEDURES_DIRECTORY); try (TransactionLegacy txn = TransactionLegacy.open("execute-procedure-scripts")) { Connection conn = txn.getConnection(); for (String filePath : filesPathUnderViewsDirectory) { - LOGGER.debug(String.format("Executing PROCEDURE script [%s].", filePath)); + LOGGER.debug("Executing PROCEDURE script [{}].", filePath); InputStream viewScript = Thread.currentThread().getContextClassLoader().getResourceAsStream(filePath); runScript(conn, viewScript); } - LOGGER.info(String.format("Finished execution of PROCEDURE scripts that are under resource directory [%s].", PROCEDURES_DIRECTORY)); + LOGGER.info("Finished execution of PROCEDURE scripts that are under resource directory [{}].", PROCEDURES_DIRECTORY); } catch (SQLException e) { String message = String.format("Unable to execute PROCEDURE scripts due to [%s].", e.getMessage()); LOGGER.error(message, e); @@ -340,7 +342,7 @@ protected void executeProcedureScripts() { } private DbUpgrade[] executeUpgrades(CloudStackVersion dbVersion, CloudStackVersion currentVersion) { - LOGGER.info("Database upgrade must be performed from " + dbVersion + " to " + currentVersion); + LOGGER.info("Database upgrade must be performed from {} to {}", dbVersion, currentVersion); final DbUpgrade[] upgrades = calculateUpgradePath(dbVersion, currentVersion); @@ -353,8 +355,8 @@ private DbUpgrade[] executeUpgrades(CloudStackVersion dbVersion, CloudStackVersi private VersionVO executeUpgrade(DbUpgrade upgrade) { VersionVO version; - LOGGER.debug("Running upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade - .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); + LOGGER.debug("Running upgrade {} to upgrade from {}-{} to {}", upgrade.getClass().getSimpleName(), upgrade.getUpgradableVersionRange()[0], upgrade + .getUpgradableVersionRange()[1], upgrade.getUpgradedVersion()); TransactionLegacy txn = TransactionLegacy.open("Upgrade"); txn.start(); try { @@ -397,8 +399,8 @@ private void executeUpgradeCleanup(DbUpgrade upgrade, VersionVO version) { // Run the corresponding '-cleanup.sql' script txn = TransactionLegacy.open("Cleanup"); try { - LOGGER.info("Cleanup upgrade " + upgrade.getClass().getSimpleName() + " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade - .getUpgradableVersionRange()[1] + " to " + upgrade.getUpgradedVersion()); + LOGGER.info("Cleanup upgrade {} to upgrade from {}-{} to {}", upgrade.getClass().getSimpleName(), upgrade.getUpgradableVersionRange()[0], upgrade + .getUpgradableVersionRange()[1], upgrade.getUpgradedVersion()); txn.start(); Connection conn; @@ -413,7 +415,7 @@ private void executeUpgradeCleanup(DbUpgrade upgrade, VersionVO version) { if (scripts != null) { for (InputStream script : scripts) { runScript(conn, script); - LOGGER.debug("Cleanup script " + upgrade.getClass().getSimpleName() + " is executed successfully"); + LOGGER.debug("Cleanup script {} is executed successfully", upgrade.getClass().getSimpleName()); } } txn.commit(); @@ -423,27 +425,27 @@ private void executeUpgradeCleanup(DbUpgrade upgrade, VersionVO version) { version.setUpdated(new Date()); _dao.update(version.getId(), version); txn.commit(); - LOGGER.debug("Upgrade completed for version " + version.getVersion()); + LOGGER.debug("Upgrade completed for version {}", version.getVersion()); } finally { txn.close(); } } protected void executeViewScripts() { - LOGGER.info(String.format("Executing VIEW scripts that are under resource directory [%s].", VIEWS_DIRECTORY)); + LOGGER.info("Executing VIEW scripts that are under resource directory [{}].", VIEWS_DIRECTORY); List filesPathUnderViewsDirectory = FileUtil.getFilesPathsUnderResourceDirectory(VIEWS_DIRECTORY); try (TransactionLegacy txn = TransactionLegacy.open("execute-view-scripts")) { Connection conn = txn.getConnection(); for (String filePath : filesPathUnderViewsDirectory) { - LOGGER.debug(String.format("Executing VIEW script [%s].", filePath)); + LOGGER.debug("Executing VIEW script [{}].", filePath); InputStream viewScript = Thread.currentThread().getContextClassLoader().getResourceAsStream(filePath); runScript(conn, viewScript); } - LOGGER.info(String.format("Finished execution of VIEW scripts that are under resource directory [%s].", VIEWS_DIRECTORY)); + LOGGER.info("Finished execution of VIEW scripts that are under resource directory [{}].", VIEWS_DIRECTORY); } catch (SQLException e) { String message = String.format("Unable to execute VIEW scripts due to [%s].", e.getMessage()); LOGGER.error(message, e); @@ -507,10 +509,10 @@ protected void doUpgrades(GlobalLock lock) { String csVersion = parseSystemVmMetadata(); final CloudStackVersion sysVmVersion = CloudStackVersion.parse(csVersion); final CloudStackVersion currentVersion = CloudStackVersion.parse(currentVersionValue); - SystemVmTemplateRegistration.CS_MAJOR_VERSION = String.valueOf(sysVmVersion.getMajorRelease()) + "." + String.valueOf(sysVmVersion.getMinorRelease()); + SystemVmTemplateRegistration.CS_MAJOR_VERSION = sysVmVersion.getMajorRelease() + "." + sysVmVersion.getMinorRelease(); SystemVmTemplateRegistration.CS_TINY_VERSION = String.valueOf(sysVmVersion.getPatchRelease()); - LOGGER.info("DB version = " + dbVersion + " Code Version = " + currentVersion); + LOGGER.info("DB version = {} Code Version = {}", dbVersion, currentVersion); if (dbVersion.compareTo(currentVersion) > 0) { throw new CloudRuntimeException("Database version " + dbVersion + " is higher than management software version " + currentVersionValue); @@ -583,7 +585,7 @@ private void decryptInit(Connection conn) throws SQLException { ResultSet result = pstmt.executeQuery()) { if (result.next()) { String init = result.getString(1); - LOGGER.info("init = " + DBEncryptionUtil.decrypt(init)); + LOGGER.info("init = {}", DBEncryptionUtil.decrypt(init)); } } } @@ -614,21 +616,11 @@ public String getUpgradedVersion() { return upgradedVersion; } - @Override - public boolean supportsRollingUpgrade() { - return false; - } - @Override public InputStream[] getPrepareScripts() { return new InputStream[0]; } - @Override - public void performDataMigration(Connection conn) { - - } - @Override public InputStream[] getCleanupScripts() { return new InputStream[0]; diff --git a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java index f13fe2c6bc38..6f4d3c22eec6 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/SystemVmTemplateRegistration.java @@ -28,7 +28,6 @@ import java.sql.Date; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -51,6 +50,7 @@ import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.utils.security.DigestHelper; +import org.apache.cloudstack.utils.server.ServerPropertiesUtil; import org.apache.commons.lang3.StringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -62,6 +62,8 @@ import com.cloud.dc.dao.ClusterDaoImpl; import com.cloud.dc.dao.DataCenterDao; import com.cloud.dc.dao.DataCenterDaoImpl; +import com.cloud.dc.dao.DataCenterDetailsDao; +import com.cloud.dc.dao.DataCenterDetailsDaoImpl; import com.cloud.hypervisor.Hypervisor; import com.cloud.storage.DataStoreRole; import com.cloud.storage.GuestOSVO; @@ -103,14 +105,20 @@ public class SystemVmTemplateRegistration { private static final String METADATA_FILE = TEMPLATES_PATH + METADATA_FILE_NAME; public static final String TEMPORARY_SECONDARY_STORE = "tmp"; private static final String PARTIAL_TEMPLATE_FOLDER = String.format("/template/tmpl/%d/", Account.ACCOUNT_ID_SYSTEM); - private static final String storageScriptsDir = "scripts/storage/secondary"; + protected static final String STORAGE_SCRIPTS_DIR = "scripts/storage/secondary"; private static final Integer OTHER_LINUX_ID = 99; - private static Integer LINUX_12_ID = 363; + protected static Integer LINUX_12_ID = 363; private static final Integer SCRIPT_TIMEOUT = 1800000; private static final Integer LOCK_WAIT_TIMEOUT = 1200; + protected static final String TEMPLATE_DOWNLOAD_URL_KEY = "downloadurl"; + protected static final String TEMPLATES_DOWNLOAD_REPOSITORY_KEY = "downloadrepository"; + protected static final String TEMPLATES_CUSTOM_DOWNLOAD_REPOSITORY_KEY = "system.vm.templates.download.repository"; protected static final List DOWNLOADABLE_TEMPLATE_ARCH_TYPES = Arrays.asList( + CPU.CPUArch.amd64, CPU.CPUArch.arm64 ); + protected static final String MINIMUM_SYSTEM_VM_VERSION_KEY = "minreq.sysvmtemplate.version"; + protected static final String DEFAULT_SYSTEM_VM_GUEST_OS_NAME = "Debian GNU/Linux 12 (64-bit)"; public static String CS_MAJOR_VERSION = null; public static String CS_TINY_VERSION = null; @@ -134,7 +142,9 @@ public class SystemVmTemplateRegistration { @Inject ConfigurationDao configurationDao; @Inject - private GuestOSDao guestOSDao; + DataCenterDetailsDao dataCenterDetailsDao; + @Inject + GuestOSDao guestOSDao; private String systemVmTemplateVersion; @@ -142,6 +152,7 @@ public class SystemVmTemplateRegistration { public SystemVmTemplateRegistration() { dataCenterDao = new DataCenterDaoImpl(); + dataCenterDetailsDao = new DataCenterDetailsDaoImpl(); vmTemplateDao = new VMTemplateDaoImpl(); vmTemplateZoneDao = new VMTemplateZoneDaoImpl(); templateDataStoreDao = new BasicTemplateDataStoreDaoImpl(); @@ -155,33 +166,14 @@ public SystemVmTemplateRegistration() { } /** - * Convenience constructor method to use when there is no system VM template change for a new version. + * Convenience constructor method to use when there is no system VM Template change for a new version. */ public SystemVmTemplateRegistration(String systemVmTemplateVersion) { this(); this.systemVmTemplateVersion = systemVmTemplateVersion; } - public static String getMountCommand(String nfsVersion, String device, String dir) { - String cmd = MOUNT_COMMAND_BASE; - if (StringUtils.isNotBlank(nfsVersion)) { - cmd = String.format("%s -o vers=%s", cmd, nfsVersion); - } - return String.format("%s %s %s", cmd, device, dir); - } - - public String getSystemVmTemplateVersion() { - if (StringUtils.isEmpty(systemVmTemplateVersion)) { - return String.format("%s.%s", CS_MAJOR_VERSION, CS_TINY_VERSION); - } - return systemVmTemplateVersion; - } - - public File getTempDownloadDir() { - return tempDownloadDir; - } - - private static class SystemVMTemplateDetails { + protected static class SystemVMTemplateDetails { Long id; String uuid; String name; @@ -312,114 +304,53 @@ public void setUpdated(Date updated) { } } - public static final List> hypervisorList = Arrays.asList( + protected static final List> AVAILABLE_SYSTEM_TEMPLATES_HYPERVISOR_ARCH_LIST = Arrays.asList( new Pair<>(Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64), new Pair<>(Hypervisor.HypervisorType.KVM, CPU.CPUArch.arm64), - new Pair<>(Hypervisor.HypervisorType.VMware, null), - new Pair<>(Hypervisor.HypervisorType.XenServer, null), - new Pair<>(Hypervisor.HypervisorType.Hyperv, null), - new Pair<>(Hypervisor.HypervisorType.LXC, null), - new Pair<>(Hypervisor.HypervisorType.Ovm3, null) + new Pair<>(Hypervisor.HypervisorType.VMware, CPU.CPUArch.amd64), + new Pair<>(Hypervisor.HypervisorType.XenServer, CPU.CPUArch.amd64), + new Pair<>(Hypervisor.HypervisorType.LXC, CPU.CPUArch.amd64) ); - public static final Map NewTemplateMap = new HashMap<>(); + protected static final List METADATA_TEMPLATE_LIST = new ArrayList<>(); - public static final Map RouterTemplateConfigurationNames = new HashMap<>() { + protected static final Map ROUTER_TEMPLATE_CONFIGURATION_NAMES = new HashMap<>() { { put(Hypervisor.HypervisorType.KVM, "router.template.kvm"); put(Hypervisor.HypervisorType.VMware, "router.template.vmware"); put(Hypervisor.HypervisorType.XenServer, "router.template.xenserver"); - put(Hypervisor.HypervisorType.Hyperv, "router.template.hyperv"); put(Hypervisor.HypervisorType.LXC, "router.template.lxc"); - put(Hypervisor.HypervisorType.Ovm3, "router.template.ovm3"); } }; - public static Map hypervisorGuestOsMap = new HashMap<>() { - { - put(Hypervisor.HypervisorType.KVM, LINUX_12_ID); - put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID); - put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID); - put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID); - put(Hypervisor.HypervisorType.LXC, LINUX_12_ID); - put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID); - } - }; - - public static final Map hypervisorImageFormat = new HashMap() { + protected static final Map HYPERVISOR_IMAGE_FORMAT_MAP = new HashMap<>() { { put(Hypervisor.HypervisorType.KVM, ImageFormat.QCOW2); put(Hypervisor.HypervisorType.XenServer, ImageFormat.VHD); put(Hypervisor.HypervisorType.VMware, ImageFormat.OVA); - put(Hypervisor.HypervisorType.Hyperv, ImageFormat.VHD); put(Hypervisor.HypervisorType.LXC, ImageFormat.QCOW2); - put(Hypervisor.HypervisorType.Ovm3, ImageFormat.RAW); } }; - public boolean validateIfSeeded(TemplateDataStoreVO templDataStoreVO, String url, String path, String nfsVersion) { - String filePath = null; - try { - filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString(); - if (filePath == null) { - throw new CloudRuntimeException("Failed to create temporary directory to mount secondary store"); - } - mountStore(url, filePath, nfsVersion); - int lastIdx = path.lastIndexOf(File.separator); - String partialDirPath = path.substring(0, lastIdx); - String templatePath = filePath + File.separator + partialDirPath; - File templateProps = new File(templatePath + "/template.properties"); - if (templateProps.exists()) { - Pair templateSizes = readTemplatePropertiesSizes(templatePath + "/template.properties"); - updateSeededTemplateDetails(templDataStoreVO.getTemplateId(), templDataStoreVO.getDataStoreId(), - templateSizes.first(), templateSizes.second()); - LOGGER.info("SystemVM Template already seeded, skipping registration"); - return true; - } - LOGGER.info("SystemVM Template not seeded"); - return false; - } catch (Exception e) { - LOGGER.error("Failed to verify if the Template is seeded", e); - throw new CloudRuntimeException("Failed to verify if the Template is seeded", e); - } finally { - unmountStore(filePath); - try { - Files.delete(Path.of(filePath)); - } catch (IOException e) { - LOGGER.error(String.format("Failed to delete temporary directory: %s", filePath)); - } + public static Map hypervisorGuestOsMap = new HashMap<>() { + { + put(Hypervisor.HypervisorType.KVM, LINUX_12_ID); + put(Hypervisor.HypervisorType.XenServer, OTHER_LINUX_ID); + put(Hypervisor.HypervisorType.VMware, OTHER_LINUX_ID); + put(Hypervisor.HypervisorType.LXC, LINUX_12_ID); } + }; + + private static boolean isRunningInTest() { + return "true".equalsIgnoreCase(System.getProperty("test.mode")); } private static String getHypervisorArchLog(Hypervisor.HypervisorType hypervisorType, CPU.CPUArch arch) { StringBuilder sb = new StringBuilder("hypervisor: ").append(hypervisorType.name()); - if (Hypervisor.HypervisorType.KVM.equals(hypervisorType)) { - sb.append(", arch: ").append(arch == null ? CPU.CPUArch.amd64.getType() : arch.getType()); - } + sb.append(", arch: ").append(arch == null ? CPU.CPUArch.amd64.getType() : arch.getType()); return sb.toString(); } - protected static String getHypervisorArchKey(Hypervisor.HypervisorType hypervisorType, CPU.CPUArch arch) { - if (Hypervisor.HypervisorType.KVM.equals(hypervisorType)) { - return String.format("%s-%s", hypervisorType.name().toLowerCase(), - arch == null ? CPU.CPUArch.amd64.getType() : arch.getType()); - } - return hypervisorType.name().toLowerCase(); - } - - protected static MetadataTemplateDetails getMetadataTemplateDetails(Hypervisor.HypervisorType hypervisorType, - CPU.CPUArch arch) { - return NewTemplateMap.get(getHypervisorArchKey(hypervisorType, arch)); - } - - public VMTemplateVO getRegisteredTemplate(String templateName, CPU.CPUArch arch) { - return vmTemplateDao.findLatestTemplateByName(templateName, arch); - } - - private static boolean isRunningInTest() { - return "true".equalsIgnoreCase(System.getProperty("test.mode")); - } - /** * Attempts to determine the templates directory path by locating the metadata file. *

@@ -460,7 +391,170 @@ private static String fetchTemplatesPath() { throw new CloudRuntimeException(errMsg); } - private List getEligibleZoneIds() { + protected static void cleanupStore(Long templateId, String filePath) { + String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + templateId; + try { + Files.deleteIfExists(Paths.get(destTempFolder)); + } catch (IOException e) { + LOGGER.error("Failed to cleanup mounted store at: {}", filePath, e); + } + } + + protected static Pair readTemplatePropertiesSizes(String path) { + File tmpFile = new File(path); + Long size = null; + long physicalSize = 0L; + try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr)) { + String line = null; + while ((line = brf.readLine()) != null) { + if (line.startsWith("size=")) { + physicalSize = Long.parseLong(line.split("=")[1]); + } else if (line.startsWith("virtualsize=")) { + size = Long.parseLong(line.split("=")[1]); + } + if (size == null) { + size = physicalSize; + } + } + } catch (IOException ex) { + LOGGER.warn("Failed to read from template.properties", ex); + } + return new Pair<>(size, physicalSize); + } + + protected static MetadataTemplateDetails getMetadataTemplateDetails(Hypervisor.HypervisorType hypervisorType, + CPU.CPUArch arch) { + return METADATA_TEMPLATE_LIST + .stream() + .filter(x -> Objects.equals(x.getHypervisorType(), hypervisorType) && + Objects.equals(x.getArch(), arch)) + .findFirst() + .orElse(null); + } + + protected static String getMetadataFilePath() { + return METADATA_FILE; + } + + protected static Ini.Section getMetadataSectionForHypervisorAndArch(Ini ini, + Hypervisor.HypervisorType hypervisorType, CPU.CPUArch arch) { + String key = String.format("%s-%s", hypervisorType.name().toLowerCase(), + arch.getType().toLowerCase()); + Ini.Section section = ini.get(key); + if (section == null && !Hypervisor.HypervisorType.KVM.equals(hypervisorType)) { + key = String.format("%s", hypervisorType.name().toLowerCase()); + section = ini.get(key); + } + return section; + } + + protected static String getMountCommand(String nfsVersion, String device, String dir) { + String cmd = MOUNT_COMMAND_BASE; + if (StringUtils.isNotBlank(nfsVersion)) { + cmd = String.format("%s -o vers=%s", cmd, nfsVersion); + } + return String.format("%s %s %s", cmd, device, dir); + } + + /** + * This method parses the metadata file consisting of the system VM Templates information + * @return the version of the system VM Template that is to be used. This is done in order + * to fallback on the latest available version of the system VM Template when there doesn't + * exist a template corresponding to the current code version. + */ + public static String parseMetadataFile() { + String metadataFilePath = getMetadataFilePath(); + String errMsg = String.format("Failed to parse system VM Template metadata file: %s", metadataFilePath); + final Ini ini = new Ini(); + try (FileReader reader = new FileReader(metadataFilePath)) { + ini.load(reader); + } catch (IOException e) { + LOGGER.error(errMsg, e); + throw new CloudRuntimeException(errMsg, e); + } + if (!ini.containsKey("default")) { + errMsg = String.format("%s as unable to default section", errMsg); + LOGGER.error(errMsg); + throw new CloudRuntimeException(errMsg); + } + Ini.Section defaultSection = ini.get("default"); + String defaultDownloadRepository = defaultSection.get(TEMPLATES_DOWNLOAD_REPOSITORY_KEY); + String customDownloadRepository = ServerPropertiesUtil.getProperty(TEMPLATES_CUSTOM_DOWNLOAD_REPOSITORY_KEY); + boolean updateCustomDownloadRepository = StringUtils.isNotBlank(customDownloadRepository) && + StringUtils.isNotBlank(defaultDownloadRepository); + for (Pair hypervisorTypeArchPair : AVAILABLE_SYSTEM_TEMPLATES_HYPERVISOR_ARCH_LIST) { + String key = String.format("%s-%s", hypervisorTypeArchPair.first().name().toLowerCase(), + hypervisorTypeArchPair.second().getType().toLowerCase()); + Ini.Section section = getMetadataSectionForHypervisorAndArch(ini, hypervisorTypeArchPair.first(), + hypervisorTypeArchPair.second()); + if (section == null) { + LOGGER.error("Failed to find details for {} in template metadata file: {}", + getHypervisorArchLog(hypervisorTypeArchPair.first(), hypervisorTypeArchPair.second()), + metadataFilePath); + continue; + } + String url = section.get(TEMPLATE_DOWNLOAD_URL_KEY); + if (StringUtils.isNotBlank(url) && updateCustomDownloadRepository) { + url = url.replaceFirst(defaultDownloadRepository.trim(), + customDownloadRepository.trim()); + LOGGER.debug("Updated download URL for {} using custom repository to {}", key, url); + } + METADATA_TEMPLATE_LIST.add(new MetadataTemplateDetails( + hypervisorTypeArchPair.first(), + section.get("templatename"), + section.get("filename"), + url, + section.get("checksum"), + hypervisorTypeArchPair.second(), + section.get("guestos"))); + } + return defaultSection.get("version").trim(); + } + + public static void mountStore(String storeUrl, String path, String nfsVersion) { + try { + if (storeUrl == null) { + return; + } + URI uri = new URI(UriUtils.encodeURIComponent(storeUrl)); + String host = uri.getHost(); + String mountPath = uri.getPath(); + Script.runSimpleBashScript(getMountCommand(nfsVersion, host + ":" + mountPath, path)); + } catch (Exception e) { + String msg = "NFS Store URL is not in the correct format"; + LOGGER.error(msg, e); + throw new CloudRuntimeException(msg, e); + } + } + + public static void unmountStore(String filePath) { + try { + LOGGER.info("Unmounting store"); + String umountCmd = String.format(UMOUNT_COMMAND, filePath); + Script.runSimpleBashScript(umountCmd); + try { + Files.deleteIfExists(Paths.get(filePath)); + } catch (IOException e) { + LOGGER.error("Failed to cleanup mounted store at: {}", filePath, e); + } + } catch (Exception e) { + String msg = String.format("Failed to unmount store mounted at %s", filePath); + LOGGER.error(msg, e); + throw new CloudRuntimeException(msg, e); + } + } + + protected File getTempDownloadDir() { + return tempDownloadDir; + } + + protected void readTemplateProperties(String path, SystemVMTemplateDetails details) { + Pair templateSizes = readTemplatePropertiesSizes(path); + details.setSize(templateSizes.first()); + details.setPhysicalSize(templateSizes.second()); + } + + protected List getEligibleZoneIds() { List zoneIds = new ArrayList<>(); List stores = imageStoreDao.findByProtocol("nfs"); for (ImageStoreVO store : stores) { @@ -484,27 +578,18 @@ protected Pair getNfsStoreInZone(Long zoneId) { return new Pair<>(url, storeId); } - public static void mountStore(String storeUrl, String path, String nfsVersion) { - try { - if (storeUrl == null) { - return; - } - URI uri = new URI(UriUtils.encodeURIComponent(storeUrl)); - String host = uri.getHost(); - String mountPath = uri.getPath(); - Script.runSimpleBashScript(getMountCommand(nfsVersion, host + ":" + mountPath, path)); - } catch (Exception e) { - String msg = "NFS Store URL is not in the correct format"; - LOGGER.error(msg, e); - throw new CloudRuntimeException(msg, e); + protected String getSystemVmTemplateVersion() { + if (StringUtils.isEmpty(systemVmTemplateVersion)) { + return String.format("%s.%s", CS_MAJOR_VERSION, CS_TINY_VERSION); } + return systemVmTemplateVersion; } private VMTemplateVO createTemplateObjectInDB(SystemVMTemplateDetails details) { Long templateId = vmTemplateDao.getNextInSequence(Long.class, "id"); VMTemplateVO template = new VMTemplateVO(); template.setUuid(details.getUuid()); - template.setUniqueName(String.format("routing-%s" , String.valueOf(templateId))); + template.setUniqueName(String.format("routing-%s" , templateId)); template.setName(details.getName()); template.setPublicTemplate(false); template.setFeatured(false); @@ -527,7 +612,7 @@ private VMTemplateVO createTemplateObjectInDB(SystemVMTemplateDetails details) { return template; } - private VMTemplateZoneVO createOrUpdateTemplateZoneEntry(long zoneId, long templateId) { + protected VMTemplateZoneVO createOrUpdateTemplateZoneEntry(long zoneId, long templateId) { VMTemplateZoneVO templateZoneVO = vmTemplateZoneDao.findByZoneTemplate(zoneId, templateId); if (templateZoneVO == null) { templateZoneVO = new VMTemplateZoneVO(zoneId, templateId, new java.util.Date()); @@ -541,33 +626,37 @@ private VMTemplateZoneVO createOrUpdateTemplateZoneEntry(long zoneId, long templ return templateZoneVO; } - private void createCrossZonesTemplateZoneRefEntries(Long templateId) { + protected void createCrossZonesTemplateZoneRefEntries(Long templateId) { List dcs = dataCenterDao.listAll(); for (DataCenterVO dc : dcs) { VMTemplateZoneVO templateZoneVO = createOrUpdateTemplateZoneEntry(dc.getId(), templateId); if (templateZoneVO == null) { - throw new CloudRuntimeException(String.format("Failed to create template_zone_ref record for the systemVM Template (id: %s) and zone: %s", templateId, dc)); + throw new CloudRuntimeException(String.format("Failed to create template-zone record for the system " + + "VM Template (ID : %d) and zone: %s", templateId, dc)); } } } - private void createTemplateStoreRefEntry(SystemVMTemplateDetails details) { - TemplateDataStoreVO templateDataStoreVO = new TemplateDataStoreVO(details.storeId, details.getId(), details.getCreated(), 0, - VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED, null, null, null, details.getInstallPath(), details.getUrl()); + protected void createTemplateStoreRefEntry(SystemVMTemplateDetails details) { + TemplateDataStoreVO templateDataStoreVO = new TemplateDataStoreVO(details.getStoreId(), details.getId(), + details.getCreated(), 0, VMTemplateStorageResourceAssoc.Status.NOT_DOWNLOADED, + null, null, null, details.getInstallPath(), details.getUrl()); templateDataStoreVO.setDataStoreRole(DataStoreRole.Image); templateDataStoreVO = templateDataStoreDao.persist(templateDataStoreVO); if (templateDataStoreVO == null) { - throw new CloudRuntimeException(String.format("Failed to create template_store_ref record for the systemVM Template for hypervisor: %s", details.getHypervisorType().name())); + throw new CloudRuntimeException(String.format("Failed to create template-store record for the system VM " + + "template (ID : %d) and store (ID: %d)", details.getId(), details.getStoreId())); } } - public void updateTemplateDetails(SystemVMTemplateDetails details) { + protected void updateTemplateDetails(SystemVMTemplateDetails details) { VMTemplateVO template = vmTemplateDao.findById(details.getId()); template.setSize(details.getSize()); template.setState(VirtualMachineTemplate.State.Active); vmTemplateDao.update(template.getId(), template); - TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(details.getStoreId(), template.getId()); + TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(details.getStoreId(), + template.getId()); templateDataStoreVO.setSize(details.getSize()); templateDataStoreVO.setPhysicalSize(details.getPhysicalSize()); templateDataStoreVO.setDownloadPercent(100); @@ -576,11 +665,11 @@ public void updateTemplateDetails(SystemVMTemplateDetails details) { templateDataStoreVO.setState(ObjectInDataStoreStateMachine.State.Ready); boolean updated = templateDataStoreDao.update(templateDataStoreVO.getId(), templateDataStoreVO); if (!updated) { - throw new CloudRuntimeException("Failed to update template_store_ref entry for registered systemVM Template"); + throw new CloudRuntimeException("Failed to update template-store record for registered system VM Template"); } } - public void updateSeededTemplateDetails(long templateId, long storeId, long size, long physicalSize) { + protected void updateSeededTemplateDetails(long templateId, long storeId, long size, long physicalSize) { VMTemplateVO template = vmTemplateDao.findById(templateId); template.setSize(size); vmTemplateDao.update(template.getId(), template); @@ -591,108 +680,75 @@ public void updateSeededTemplateDetails(long templateId, long storeId, long size templateDataStoreVO.setLastUpdated(new Date(DateUtil.currentGMTTime().getTime())); boolean updated = templateDataStoreDao.update(templateDataStoreVO.getId(), templateDataStoreVO); if (!updated) { - throw new CloudRuntimeException("Failed to update template_store_ref entry for seeded systemVM template"); + throw new CloudRuntimeException("Failed to update template-store record for seeded system VM Template"); } } - public void updateSystemVMEntries(Long templateId, Hypervisor.HypervisorType hypervisorType) { + protected void updateSystemVMEntries(Long templateId, Hypervisor.HypervisorType hypervisorType) { vmInstanceDao.updateSystemVmTemplateId(templateId, hypervisorType); } - private void updateSystemVmTemplateGuestOsId() { - String systemVmGuestOsName = "Debian GNU/Linux 12 (64-bit)"; // default + protected void updateHypervisorGuestOsMap() { try { - GuestOSVO guestOS = guestOSDao.findOneByDisplayName(systemVmGuestOsName); - if (guestOS != null) { - LOGGER.debug("Updating SystemVM Template Guest OS [{}] id", systemVmGuestOsName); - SystemVmTemplateRegistration.LINUX_12_ID = Math.toIntExact(guestOS.getId()); - hypervisorGuestOsMap.put(Hypervisor.HypervisorType.KVM, LINUX_12_ID); - hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Hyperv, LINUX_12_ID); - hypervisorGuestOsMap.put(Hypervisor.HypervisorType.LXC, LINUX_12_ID); - hypervisorGuestOsMap.put(Hypervisor.HypervisorType.Ovm3, LINUX_12_ID); + GuestOSVO guestOS = guestOSDao.findOneByDisplayName(DEFAULT_SYSTEM_VM_GUEST_OS_NAME); + if (guestOS == null) { + LOGGER.warn("Couldn't find Guest OS by name [{}] to update system VM Template guest OS ID", + DEFAULT_SYSTEM_VM_GUEST_OS_NAME); + return; } + LOGGER.debug("Updating system VM Template guest OS [{}] ID", DEFAULT_SYSTEM_VM_GUEST_OS_NAME); + SystemVmTemplateRegistration.LINUX_12_ID = Math.toIntExact(guestOS.getId()); + hypervisorGuestOsMap.put(Hypervisor.HypervisorType.KVM, LINUX_12_ID); + hypervisorGuestOsMap.put(Hypervisor.HypervisorType.LXC, LINUX_12_ID); } catch (Exception e) { - LOGGER.warn("Couldn't update SystemVM Template Guest OS id, due to {}", e.getMessage()); + LOGGER.warn("Couldn't update System VM template guest OS ID, due to {}", e.getMessage()); } } - public void updateConfigurationParams(Map configParams) { - for (Map.Entry config : configParams.entrySet()) { - boolean updated = configurationDao.update(config.getKey(), config.getValue()); - if (!updated) { - throw new CloudRuntimeException(String.format("Failed to update configuration parameter %s", config.getKey())); - } + protected void updateConfigurationParams(Hypervisor.HypervisorType hypervisorType, String templateName, Long zoneId) { + String configName = ROUTER_TEMPLATE_CONFIGURATION_NAMES.get(hypervisorType); + boolean updated = configurationDao.update(configName, templateName); + if (!updated) { + throw new CloudRuntimeException(String.format("Failed to update configuration parameter %s", configName)); } - } - - private static Pair readTemplatePropertiesSizes(String path) { - File tmpFile = new File(path); - Long size = null; - Long physicalSize = 0L; - try (FileReader fr = new FileReader(tmpFile); BufferedReader brf = new BufferedReader(fr);) { - String line = null; - while ((line = brf.readLine()) != null) { - if (line.startsWith("size=")) { - physicalSize = Long.parseLong(line.split("=")[1]); - } else if (line.startsWith("virtualsize=")) { - size = Long.parseLong(line.split("=")[1]); - } - if (size == null) { - size = physicalSize; - } - } - } catch (IOException ex) { - LOGGER.warn("Failed to read from template.properties", ex); + if (zoneId != null) { + dataCenterDetailsDao.removeDetail(zoneId, configName); + } + updated = configurationDao.update(MINIMUM_SYSTEM_VM_VERSION_KEY, getSystemVmTemplateVersion()); + if (!updated) { + throw new CloudRuntimeException(String.format("Failed to update configuration parameter %s", configName)); + } + if (zoneId != null) { + dataCenterDetailsDao.removeDetail(zoneId, MINIMUM_SYSTEM_VM_VERSION_KEY); } - return new Pair<>(size, physicalSize); - } - - public static void readTemplateProperties(String path, SystemVMTemplateDetails details) { - Pair templateSizes = readTemplatePropertiesSizes(path); - details.setSize(templateSizes.first()); - details.setPhysicalSize(templateSizes.second()); } - private void updateTemplateTablesOnFailure(long templateId) { + protected void updateTemplateEntriesOnFailure(long templateId) { VMTemplateVO template = vmTemplateDao.createForUpdate(templateId); template.setState(VirtualMachineTemplate.State.Inactive); vmTemplateDao.update(template.getId(), template); vmTemplateDao.remove(templateId); - TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(template.getId(), DataStoreRole.Image); - templateDataStoreDao.remove(templateDataStoreVO.getId()); - } - - public static void unmountStore(String filePath) { - try { - LOGGER.info("Unmounting store"); - String umountCmd = String.format(UMOUNT_COMMAND, filePath); - Script.runSimpleBashScript(umountCmd); - try { - Files.deleteIfExists(Paths.get(filePath)); - } catch (IOException e) { - LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e); - } - } catch (Exception e) { - String msg = String.format("Failed to unmount store mounted at %s", filePath); - LOGGER.error(msg, e); - throw new CloudRuntimeException(msg, e); + TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByTemplate(template.getId(), + DataStoreRole.Image); + if (templateDataStoreVO == null) { + return; } + templateDataStoreDao.remove(templateDataStoreVO.getId()); } - private void setupTemplate(String templateName, Hypervisor.HypervisorType hypervisor, CPU.CPUArch arch, - String destTempFolder) throws CloudRuntimeException { - String setupTmpltScript = Script.findScript(storageScriptsDir, "setup-sysvm-tmplt"); + protected void setupTemplateOnStore(String templateName, MetadataTemplateDetails templateDetails, + String destTempFolder) throws CloudRuntimeException { + String setupTmpltScript = Script.findScript(STORAGE_SCRIPTS_DIR, "setup-sysvm-tmplt"); if (setupTmpltScript == null) { - throw new CloudRuntimeException("Unable to find the createtmplt.sh"); + throw new CloudRuntimeException("Unable to find the setup-sysvm-tmplt script"); } Script scr = new Script(setupTmpltScript, SCRIPT_TIMEOUT, LOGGER); scr.add("-u", templateName); - MetadataTemplateDetails templateDetails = NewTemplateMap.get(getHypervisorArchKey(hypervisor, arch)); String filePath = StringUtils.isNotBlank(templateDetails.getDownloadedFilePath()) ? templateDetails.getDownloadedFilePath() : templateDetails.getDefaultFilePath(); scr.add("-f", filePath); - scr.add("-h", hypervisor.name().toLowerCase(Locale.ROOT)); + scr.add("-h", templateDetails.getHypervisorType().name().toLowerCase(Locale.ROOT)); scr.add("-d", destTempFolder); String result = scr.execute(); if (result != null) { @@ -702,17 +758,33 @@ private void setupTemplate(String templateName, Hypervisor.HypervisorType hyperv } } - private Long performTemplateRegistrationOperations(Hypervisor.HypervisorType hypervisor, - String name, CPU.CPUArch arch, String url, String checksum, ImageFormat format, long guestOsId, - Long storeId, Long templateId, String filePath, TemplateDataStoreVO templateDataStoreVO) { + /** + * Register or update a system VM Template record and seed it on the target store. + * + * @param name display name of the template + * @param templateDetails metadata for the template + * @param url download URL of the template + * @param checksum expected checksum of the template file + * @param format image format of the template + * @param guestOsId guest OS id + * @param storeId target image store id + * @param templateId existing template id if present, otherwise {@code null} + * @param filePath temporary mount path for the store + * @param templateDataStoreVO existing template-store mapping; may be {@code null} + * @return the id of the template that was created or updated + */ + protected Long performTemplateRegistrationOperations(String name, MetadataTemplateDetails templateDetails, + String url, String checksum, ImageFormat format, long guestOsId, Long storeId, Long templateId, + String filePath, TemplateDataStoreVO templateDataStoreVO) { String templateName = UUID.randomUUID().toString(); Date created = new Date(DateUtil.currentGMTTime().getTime()); - SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, name, created, - url, checksum, format, (int) guestOsId, hypervisor, arch, storeId); + SystemVMTemplateDetails details = new SystemVMTemplateDetails(templateName, name, created, url, checksum, + format, (int) guestOsId, templateDetails.getHypervisorType(), templateDetails.getArch(), storeId); if (templateId == null) { VMTemplateVO template = createTemplateObjectInDB(details); if (template == null) { - throw new CloudRuntimeException(String.format("Failed to register Template for hypervisor: %s", hypervisor.name())); + throw new CloudRuntimeException(String.format("Failed to register Template for hypervisor: %s", + templateDetails.getHypervisorType().name())); } templateId = template.getId(); } @@ -721,153 +793,126 @@ private Long performTemplateRegistrationOperations(Hypervisor.HypervisorType hyp details.setId(templateId); String destTempFolderName = String.valueOf(templateId); String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + destTempFolderName; - details.setInstallPath(PARTIAL_TEMPLATE_FOLDER + destTempFolderName + File.separator + templateName + "." + hypervisorImageFormat.get(hypervisor).getFileExtension()); + details.setInstallPath(String.format("%s%s%s%s.%s", PARTIAL_TEMPLATE_FOLDER, destTempFolderName, + File.separator, templateName, + HYPERVISOR_IMAGE_FORMAT_MAP.get(templateDetails.getHypervisorType()).getFileExtension())); if (templateDataStoreVO == null) { createTemplateStoreRefEntry(details); } - setupTemplate(templateName, hypervisor, arch, destTempFolder); + setupTemplateOnStore(templateName, templateDetails, destTempFolder); readTemplateProperties(destTempFolder + "/template.properties", details); details.setUpdated(new Date(DateUtil.currentGMTTime().getTime())); updateTemplateDetails(details); return templateId; } - public void registerTemplate(Hypervisor.HypervisorType hypervisor, String name, Long storeId, - VMTemplateVO templateVO, TemplateDataStoreVO templateDataStoreVO, String filePath) { + /** + * Add an existing system VM Template to a secondary image store and update related DB entries. + * + * @param templateVO the existing VM template (must not be null) + * @param templateDetails the metadata details of the template to be added + * @param templateDataStoreVO optional existing template-store mapping; may be null + * @param zoneId zone id where the operation is performed + * @param storeId target image store id + * @param filePath temporary mount path for the store + * @throws CloudRuntimeException on failure; the method attempts rollback/cleanup + */ + protected void addExistingTemplateToStore(VMTemplateVO templateVO, MetadataTemplateDetails templateDetails, + TemplateDataStoreVO templateDataStoreVO, long zoneId, Long storeId, String filePath) { try { - performTemplateRegistrationOperations(hypervisor, name, templateVO.getArch(), templateVO.getUrl(), + performTemplateRegistrationOperations(templateVO.getName(), templateDetails, templateVO.getUrl(), templateVO.getChecksum(), templateVO.getFormat(), templateVO.getGuestOSId(), storeId, templateVO.getId(), filePath, templateDataStoreVO); } catch (Exception e) { - String errMsg = String.format("Failed to register Template for hypervisor: %s", hypervisor); + String errMsg = String.format("Failed to add %s to store ID: %d, zone ID: %d", templateVO, storeId, zoneId); LOGGER.error(errMsg, e); - updateTemplateTablesOnFailure(templateVO.getId()); cleanupStore(templateVO.getId(), filePath); throw new CloudRuntimeException(errMsg, e); } } - public void registerTemplateForNonExistingEntries(Hypervisor.HypervisorType hypervisor, CPU.CPUArch arch, - String name, Pair storeUrlAndId, String filePath) { + /** + * Registers a new system VM Template for the given hypervisor/arch when no existing template is present. + * + * @param name the name of the new template + * @param templateDetails the metadata details of the template to be registered + * @param zoneId the zone id for which the new template should be seeded + * @param storeId the store id on which the new template will be seeded + * @param filePath temporary mount path for the store + * @throws CloudRuntimeException on failure; the method attempts rollback/cleanup + */ + protected void registerNewTemplate(String name, MetadataTemplateDetails templateDetails, long zoneId, Long storeId, + String filePath) { Long templateId = null; + Hypervisor.HypervisorType hypervisor = templateDetails.getHypervisorType(); try { - MetadataTemplateDetails templateDetails = getMetadataTemplateDetails(hypervisor, arch); - templateId = performTemplateRegistrationOperations(hypervisor, name, - templateDetails.getArch(), templateDetails.getUrl(), - templateDetails.getChecksum(), hypervisorImageFormat.get(hypervisor), - hypervisorGuestOsMap.get(hypervisor), storeUrlAndId.second(), null, filePath, null); - Map configParams = new HashMap<>(); - configParams.put(RouterTemplateConfigurationNames.get(hypervisor), templateDetails.getName()); - configParams.put("minreq.sysvmtemplate.version", getSystemVmTemplateVersion()); - updateConfigurationParams(configParams); + templateId = performTemplateRegistrationOperations(name, templateDetails, templateDetails.getUrl(), + templateDetails.getChecksum(), HYPERVISOR_IMAGE_FORMAT_MAP.get(hypervisor), + hypervisorGuestOsMap.get(hypervisor), storeId, null, filePath, null); + updateConfigurationParams(hypervisor, name, zoneId); updateSystemVMEntries(templateId, hypervisor); } catch (Exception e) { String errMsg = String.format("Failed to register Template for hypervisor: %s", hypervisor); LOGGER.error(errMsg, e); if (templateId != null) { - updateTemplateTablesOnFailure(templateId); + updateTemplateEntriesOnFailure(templateId); cleanupStore(templateId, filePath); } throw new CloudRuntimeException(errMsg, e); } } - protected void validateTemplateFileForHypervisorAndArch(Hypervisor.HypervisorType hypervisor, CPU.CPUArch arch) { + /** + * Validate presence and integrity of metadata and local template file for the given hypervisor/arch. + * + * @param hypervisor target hypervisor type + * @param arch target CPU architecture + * @return validated MetadataTemplateDetails + * @throws CloudRuntimeException if template is not available, missing, or checksum validation fails + */ + protected MetadataTemplateDetails getValidatedTemplateDetailsForHypervisorAndArch( + Hypervisor.HypervisorType hypervisor, CPU.CPUArch arch) { + if (!AVAILABLE_SYSTEM_TEMPLATES_HYPERVISOR_ARCH_LIST.contains(new Pair<>(hypervisor, arch))) { + throw new CloudRuntimeException("No system VM Template available for the given hypervisor and arch"); + } MetadataTemplateDetails templateDetails = getMetadataTemplateDetails(hypervisor, arch); + if (templateDetails == null) { + throw new CloudRuntimeException("No template details found for the given hypervisor and arch"); + } File templateFile = getTemplateFile(templateDetails); if (templateFile == null) { throw new CloudRuntimeException("Failed to find local template file"); } - if (isTemplateFileChecksumDifferent(templateDetails, templateFile)) { + if (templateDetails.isFileChecksumDifferent(templateFile)) { throw new CloudRuntimeException("Checksum failed for local template file"); } - } - - public void validateAndRegisterTemplate(Hypervisor.HypervisorType hypervisor, String name, Long storeId, - VMTemplateVO templateVO, TemplateDataStoreVO templateDataStoreVO, String filePath) { - validateTemplateFileForHypervisorAndArch(hypervisor, templateVO.getArch()); - registerTemplate(hypervisor, name, storeId, templateVO, templateDataStoreVO, filePath); - } - - public void validateAndRegisterTemplateForNonExistingEntries(Hypervisor.HypervisorType hypervisor, - CPU.CPUArch arch, String name, Pair storeUrlAndId, String filePath) { - validateTemplateFileForHypervisorAndArch(hypervisor, arch); - registerTemplateForNonExistingEntries(hypervisor, arch, name, storeUrlAndId, filePath); - } - - protected static String getMetadataFilePath() { - return METADATA_FILE; + return templateDetails; } /** - * This method parses the metadata file consisting of the systemVM templates information - * @return the version of the systemvm template that is to be used. This is done in order - * to fallback on the latest available version of the systemVM template when there doesn't - * exist a template corresponding to the current code version. + * Return the local template file. Downloads it if not present locally and url is present. + * + * @param templateDetails template metadata; may set `downloadedFilePath` + * @return the template {@code File} on disk, or {@code null} if not found/downloaded */ - public static String parseMetadataFile() { - String metadataFilePath = getMetadataFilePath(); - String errMsg = String.format("Failed to parse systemVM Template metadata file: %s", metadataFilePath); - final Ini ini = new Ini(); - try (FileReader reader = new FileReader(metadataFilePath)) { - ini.load(reader); - } catch (IOException e) { - LOGGER.error(errMsg, e); - throw new CloudRuntimeException(errMsg, e); - } - if (!ini.containsKey("default")) { - errMsg = String.format("%s as unable to default section", errMsg); - LOGGER.error(errMsg); - throw new CloudRuntimeException(errMsg); - } - for (Pair hypervisorType : hypervisorList) { - String key = getHypervisorArchKey(hypervisorType.first(), hypervisorType.second()); - Ini.Section section = ini.get(key); - if (section == null) { - LOGGER.error("Failed to find details for {} in template metadata file: {}", - key, metadataFilePath); - continue; - } - NewTemplateMap.put(key, new MetadataTemplateDetails( - hypervisorType.first(), - section.get("templatename"), - section.get("filename"), - section.get("downloadurl"), - section.get("checksum"), - hypervisorType.second(), - section.get("guestos"))); - } - Ini.Section defaultSection = ini.get("default"); - return defaultSection.get("version").trim(); - } - - - private static void cleanupStore(Long templateId, String filePath) { - String destTempFolder = filePath + PARTIAL_TEMPLATE_FOLDER + String.valueOf(templateId); - try { - Files.deleteIfExists(Paths.get(destTempFolder)); - } catch (IOException e) { - LOGGER.error(String.format("Failed to cleanup mounted store at: %s", filePath), e); - } - } - protected File getTemplateFile(MetadataTemplateDetails templateDetails) { File templateFile = new File(templateDetails.getDefaultFilePath()); if (templateFile.exists()) { return templateFile; } LOGGER.debug("{} is not present", templateFile.getAbsolutePath()); - if (DOWNLOADABLE_TEMPLATE_ARCH_TYPES.contains(templateDetails.getArch()) && - StringUtils.isNotBlank(templateDetails.getUrl())) { + if (StringUtils.isNotBlank(templateDetails.getUrl())) { LOGGER.debug("Downloading the template file {} for {}", templateDetails.getUrl(), templateDetails.getHypervisorArchLog()); Path path = Path.of(TEMPLATES_PATH); if (!Files.isWritable(path)) { - templateFile = new File(tempDownloadDir, templateDetails.getFilename()); + templateFile = new File(getTempDownloadDir(), templateDetails.getFilename()); } if (!templateFile.exists() && !HttpUtils.downloadFileWithProgress(templateDetails.getUrl(), templateFile.getAbsolutePath(), LOGGER)) { + LOGGER.error("Failed to download template for {} using url: {}", + templateDetails.getHypervisorArchLog(), templateDetails.getUrl()); return null; } templateDetails.setDownloadedFilePath(templateFile.getAbsolutePath()); @@ -875,32 +920,27 @@ protected File getTemplateFile(MetadataTemplateDetails templateDetails) { return templateFile; } - protected boolean isTemplateFileChecksumDifferent(MetadataTemplateDetails templateDetails, File templateFile) { - String templateChecksum = DigestHelper.calculateChecksum(templateFile); - if (!templateChecksum.equals(templateDetails.getChecksum())) { - LOGGER.error("Checksum {} for file {} does not match checksum {} from metadata", - templateChecksum, templateFile, templateDetails.getChecksum()); - return true; - } - return false; - } - - protected void validateTemplates(List> hypervisorsArchInUse) { + /** + * Validate that templates for the provided hypervisor/architecture pairs which are in use and are valid. + * If a template is missing or validation fails for any required pair, a + * {@link CloudRuntimeException} is thrown to abort the upgrade. If system VM Template for a hypervisor/arch is + * not considered available then validation is skipped for that pair. + * + * @param hypervisorArchList list of hypervisor/architecture pairs to validate + */ + protected void validateTemplates(List> hypervisorArchList) { boolean templatesFound = true; - for (Pair hypervisorArch : hypervisorsArchInUse) { - MetadataTemplateDetails matchedTemplate = getMetadataTemplateDetails(hypervisorArch.first(), - hypervisorArch.second()); - if (matchedTemplate == null) { - templatesFound = false; - break; - } - File tempFile = getTemplateFile(matchedTemplate); - if (tempFile == null) { - LOGGER.warn("Failed to download template for {}, moving ahead", - matchedTemplate.getHypervisorArchLog()); + for (Pair hypervisorArch : hypervisorArchList) { + if (!AVAILABLE_SYSTEM_TEMPLATES_HYPERVISOR_ARCH_LIST.contains(hypervisorArch)) { + LOGGER.info("No system VM Template available for {}. Skipping validation.", + getHypervisorArchLog(hypervisorArch.first(), hypervisorArch.second())); continue; } - if (isTemplateFileChecksumDifferent(matchedTemplate, tempFile)) { + try { + getValidatedTemplateDetailsForHypervisorAndArch(hypervisorArch.first(), hypervisorArch.second()); + } catch (CloudRuntimeException e) { + LOGGER.error("Validation failed for {}: {}", + getHypervisorArchLog(hypervisorArch.first(), hypervisorArch.second()), e.getMessage()); templatesFound = false; break; } @@ -912,10 +952,19 @@ protected void validateTemplates(List storeUrlAndId = getNfsStoreInZone(zoneId); String nfsVersion = getNfsVersion(storeUrlAndId.second()); - mountStore(storeUrlAndId.first(), filePath, nfsVersion); + mountStore(storeUrlAndId.first(), storeMountPath, nfsVersion); List> hypervisorArchList = clusterDao.listDistinctHypervisorsAndArchExcludingExternalType(zoneId); for (Pair hypervisorArch : hypervisorArchList) { @@ -925,7 +974,8 @@ protected void registerTemplatesForZone(long zoneId, String filePath) { if (templateDetails == null) { continue; } - VMTemplateVO templateVO = getRegisteredTemplate(templateDetails.getName(), templateDetails.getArch()); + VMTemplateVO templateVO = getRegisteredTemplate(templateDetails.getName(), + templateDetails.getHypervisorType(), templateDetails.getArch(), templateDetails.getUrl()); if (templateVO != null) { TemplateDataStoreVO templateDataStoreVO = templateDataStoreDao.findByStoreTemplate(storeUrlAndId.second(), templateVO.getId()); @@ -935,22 +985,22 @@ protected void registerTemplatesForZone(long zoneId, String filePath) { continue; } } - registerTemplate(hypervisorType, templateDetails.getName(), storeUrlAndId.second(), templateVO, - templateDataStoreVO, filePath); - updateRegisteredTemplateDetails(templateVO.getId(), templateDetails); + addExistingTemplateToStore(templateVO, templateDetails, templateDataStoreVO, zoneId, + storeUrlAndId.second(), storeMountPath); + updateRegisteredTemplateDetails(templateVO.getId(), templateDetails, zoneId); continue; } - registerTemplateForNonExistingEntries(hypervisorType, templateDetails.getArch(), templateDetails.getName(), - storeUrlAndId, filePath); + registerNewTemplate(templateDetails.getName(), templateDetails, zoneId, storeUrlAndId.second(), + storeMountPath); } } - public void registerTemplates(List> hypervisorsArchInUse) { + protected void registerTemplates(List> hypervisorsArchInUse) { GlobalLock lock = GlobalLock.getInternLock("UpgradeDatabase-Lock"); try { LOGGER.info("Grabbing lock to register Templates."); if (!lock.lock(LOCK_WAIT_TIMEOUT)) { - throw new CloudRuntimeException("Unable to acquire lock to register SystemVM Template."); + throw new CloudRuntimeException("Unable to acquire lock to register system VM Template."); } try { validateTemplates(hypervisorsArchInUse); @@ -970,13 +1020,13 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { unmountStore(filePath); } catch (Exception e) { unmountStore(filePath); - throw new CloudRuntimeException("Failed to register SystemVM Template. Upgrade failed"); + throw new CloudRuntimeException("Failed to register system VM Template. Upgrade Failed"); } } } }); } catch (Exception e) { - throw new CloudRuntimeException("Failed to register SystemVM Template. Upgrade failed"); + throw new CloudRuntimeException("Failed to register system VM Template. Upgrade Failed"); } } finally { lock.unlock(); @@ -984,7 +1034,18 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } } - private void updateRegisteredTemplateDetails(Long templateId, MetadataTemplateDetails templateDetails) { + /** + * Update the DB record for an existing template to mark it as a system template, + * set the guest OS (if resolvable), and propagate the change to system VM entries + * and related configuration for the template's hypervisor. + * + * @param templateId id of the template to update + * @param templateDetails metadata used to update the template record + * @param zoneId zone id whose per-zone details (if any) should be cleared; may be null + * @throws CloudRuntimeException if updating the template record fails + */ + protected void updateRegisteredTemplateDetails(Long templateId, MetadataTemplateDetails templateDetails, + Long zoneId) { VMTemplateVO templateVO = vmTemplateDao.findById(templateId); templateVO.setTemplateType(Storage.TemplateType.SYSTEM); GuestOSVO guestOS = guestOSDao.findOneByDisplayName(templateDetails.getGuestOs()); @@ -993,20 +1054,18 @@ private void updateRegisteredTemplateDetails(Long templateId, MetadataTemplateDe } boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO); if (!updated) { - String errMsg = String.format("updateSystemVmTemplates:Exception while updating Template with id %s to be marked as 'system'", templateId); + String errMsg = String.format("Exception while updating template with id %s to be marked as 'system'", + templateId); LOGGER.error(errMsg); throw new CloudRuntimeException(errMsg); } Hypervisor.HypervisorType hypervisorType = templateDetails.getHypervisorType(); updateSystemVMEntries(templateId, hypervisorType); - // Change value of global configuration parameter router.template.* for the corresponding hypervisor and minreq.sysvmtemplate.version for the ACS version - Map configParams = new HashMap<>(); - configParams.put(RouterTemplateConfigurationNames.get(hypervisorType), templateDetails.getName()); - configParams.put("minreq.sysvmtemplate.version", getSystemVmTemplateVersion()); - updateConfigurationParams(configParams); + updateConfigurationParams(hypervisorType, templateDetails.getName(), zoneId); } - private void updateTemplateUrlChecksumAndGuestOsId(VMTemplateVO templateVO, MetadataTemplateDetails templateDetails) { + protected void updateTemplateUrlChecksumAndGuestOsId(VMTemplateVO templateVO, + MetadataTemplateDetails templateDetails) { templateVO.setUrl(templateDetails.getUrl()); templateVO.setChecksum(templateDetails.getChecksum()); GuestOSVO guestOS = guestOSDao.findOneByDisplayName(templateDetails.getGuestOs()); @@ -1015,51 +1074,192 @@ private void updateTemplateUrlChecksumAndGuestOsId(VMTemplateVO templateVO, Meta } boolean updated = vmTemplateDao.update(templateVO.getId(), templateVO); if (!updated) { - String errMsg = String.format("updateSystemVmTemplates:Exception while updating 'url' and 'checksum' for hypervisor type %s", templateDetails.getHypervisorType()); + String errMsg = String.format("Exception while updating 'url' and 'checksum' for hypervisor type %s", + templateDetails.getHypervisorType()); LOGGER.error(errMsg); throw new CloudRuntimeException(errMsg); } } - protected boolean registerOrUpdateSystemVmTemplate(MetadataTemplateDetails templateDetails, - List> hypervisorsInUse) { - LOGGER.debug("Updating System VM template for {}", templateDetails.getHypervisorArchLog()); - VMTemplateVO registeredTemplate = getRegisteredTemplate(templateDetails.getName(), templateDetails.getArch()); - // change template type to SYSTEM + /** + * Updates or registers the system VM Template for the given hypervisor/arch if not already present. + * Returns true if a new template was registered. + * If there is an existing system VM Template for the given hypervisor/arch, its details are updated. + * If no existing template is found, new templates are registered for the valid hypervisor/arch which are in use. + */ + protected boolean updateOrRegisterSystemVmTemplate(MetadataTemplateDetails templateDetails, + List> hypervisorArchInUse) { + String systemVmTemplateLog = String.format("%s system VM Template for %s", getSystemVmTemplateVersion(), + templateDetails.getHypervisorArchLog()); + LOGGER.debug("Registering or updating {}", systemVmTemplateLog, + templateDetails.getHypervisorArchLog()); + VMTemplateVO registeredTemplate = getRegisteredTemplate(templateDetails.getName(), + templateDetails.getHypervisorType(), templateDetails.getArch(), templateDetails.getUrl()); if (registeredTemplate != null) { - updateRegisteredTemplateDetails(registeredTemplate.getId(), templateDetails); - } else { - boolean isHypervisorArchMatchMetadata = hypervisorsInUse.stream() - .anyMatch(p -> p.first().equals(templateDetails.getHypervisorType()) - && Objects.equals(p.second(), templateDetails.getArch())); - if (isHypervisorArchMatchMetadata) { - try { - registerTemplates(hypervisorsInUse); - return true; - } catch (final Exception e) { - throw new CloudRuntimeException(String.format("Failed to register %s templates for hypervisors: [%s]. " + - "Cannot upgrade system VMs", - getSystemVmTemplateVersion(), - StringUtils.join(hypervisorsInUse.stream() - .map(x -> getHypervisorArchKey(x.first(), x.second())) - .collect(Collectors.toList()), ",")), e); - } - } else { - LOGGER.warn("Cannot upgrade {} system VM template for {} as it is not used, not failing upgrade", - getSystemVmTemplateVersion(), templateDetails.getHypervisorArchLog()); - VMTemplateVO templateVO = vmTemplateDao.findLatestTemplateByTypeAndHypervisorAndArch( - templateDetails.getHypervisorType(), templateDetails.getArch(), Storage.TemplateType.SYSTEM); - if (templateVO != null) { - updateTemplateUrlChecksumAndGuestOsId(templateVO, templateDetails); - } + LOGGER.info("{} is already registered, updating details for: {}", + systemVmTemplateLog, templateDetails.getHypervisorArchLog(), registeredTemplate); + updateRegisteredTemplateDetails(registeredTemplate.getId(), templateDetails, null); + return false; + } + boolean isHypervisorArchMatchMetadata = hypervisorArchInUse.stream() + .anyMatch(p -> p.first().equals(templateDetails.getHypervisorType()) + && Objects.equals(p.second(), templateDetails.getArch())); + if (!isHypervisorArchMatchMetadata) { + LOGGER.warn("Skipping upgrading {} as it is not used, not failing upgrade", + getSystemVmTemplateVersion(), templateDetails.getHypervisorArchLog()); + VMTemplateVO templateVO = vmTemplateDao.findLatestTemplateByTypeAndHypervisorAndArch( + templateDetails.getHypervisorType(), templateDetails.getArch(), Storage.TemplateType.SYSTEM); + if (templateVO != null) { + updateTemplateUrlChecksumAndGuestOsId(templateVO, templateDetails); + } + return false; + } + try { + registerTemplates(hypervisorArchInUse); + return true; + } catch (final Exception e) { + throw new CloudRuntimeException(String.format("Failed to register %s templates for hypervisors: [%s]. " + + "Cannot upgrade system VMs", + getSystemVmTemplateVersion(), + StringUtils.join(hypervisorArchInUse.stream() + .map(x -> String.format("%s-%s", x.first().name(), x.second().name())) + .collect(Collectors.toList()), ",")), e); + } + } + + /** + * Return NFS version for the store: store-specific config if present + * or global config if absent. Returns null if not set. + */ + protected String getNfsVersion(long storeId) { + final String configKey = "secstorage.nfs.version"; + final Map storeDetails = imageStoreDetailsDao.getDetails(storeId); + if (storeDetails != null && storeDetails.containsKey(configKey)) { + return storeDetails.get(configKey); + } + ConfigurationVO globalNfsVersion = configurationDao.findByName(configKey); + if (globalNfsVersion != null) { + return globalNfsVersion.getValue(); + } + return null; + } + + /** + * Validate metadata for the given template's hypervisor/arch and add the existing template + * to the specified secondary store. On success, database entries are created/updated. + * + * @param templateVO template to add + * @param templateDataStoreVO existing template-store mapping; may be null + * @param zoneId zone id where the operation is performed + * @param storeId target image store id + * @param filePath temporary mount path for the store + * @throws CloudRuntimeException on failure; the method attempts rollback/cleanup + */ + public void validateAndAddTemplateToStore(VMTemplateVO templateVO, TemplateDataStoreVO templateDataStoreVO, + long zoneId, long storeId, String filePath) { + MetadataTemplateDetails templateDetails = getValidatedTemplateDetailsForHypervisorAndArch( + templateVO.getHypervisorType(), templateVO.getArch()); + addExistingTemplateToStore(templateVO, templateDetails, templateDataStoreVO, zoneId, storeId, filePath); + } + + /** + * Validate metadata for the given hypervisor/arch and register a new system VM Template + * on the specified store and zone. Creates DB entries and seeds the template on the store. + * + * @param hypervisor hypervisor type + * @param arch cpu architecture + * @param name template name to register + * @param zoneId zone id where the operation is performed + * @param storeId target image store id + * @param filePath temporary mount path for the store + * @throws CloudRuntimeException on failure; the method attempts rollback/cleanup + */ + public void validateAndRegisterNewTemplate(Hypervisor.HypervisorType hypervisor, CPU.CPUArch arch, String name, + long zoneId, long storeId, String filePath) { + MetadataTemplateDetails templateDetails = getValidatedTemplateDetailsForHypervisorAndArch(hypervisor, arch); + registerNewTemplate(name, templateDetails, zoneId, storeId, filePath); + } + + /** + * Check whether the template at the given `path` on NFS `url` is already seeded. + * If found, updates DB with sizes and returns true; otherwise returns false. + * + * @throws CloudRuntimeException on any error + */ + public boolean validateIfSeeded(TemplateDataStoreVO templDataStoreVO, String url, String path, String nfsVersion) { + String filePath = null; + try { + filePath = Files.createTempDirectory(TEMPORARY_SECONDARY_STORE).toString(); + if (filePath == null) { + throw new CloudRuntimeException("Failed to create temporary directory to mount secondary store"); + } + mountStore(url, filePath, nfsVersion); + int lastIdx = path.lastIndexOf(File.separator); + String partialDirPath = path.substring(0, lastIdx); + String templatePath = filePath + File.separator + partialDirPath; + File templateProps = new File(templatePath + "/template.properties"); + if (templateProps.exists()) { + Pair templateSizes = readTemplatePropertiesSizes(templatePath + "/template.properties"); + updateSeededTemplateDetails(templDataStoreVO.getTemplateId(), templDataStoreVO.getDataStoreId(), + templateSizes.first(), templateSizes.second()); + LOGGER.info("System VM template already seeded, skipping registration"); + return true; + } + LOGGER.info("System VM template not seeded"); + return false; + } catch (Exception e) { + LOGGER.error("Failed to verify if the template is seeded", e); + throw new CloudRuntimeException("Failed to verify if the template is seeded", e); + } finally { + unmountStore(filePath); + try { + Files.delete(Path.of(filePath)); + } catch (IOException e) { + LOGGER.error("Failed to delete temporary directory: {}", filePath); } } - return false; } + /** + * Finds a registered system VM Template matching the provided criteria. + * + *

The method first attempts to locate the latest template by {@code templateName}, + * {@code hypervisorType} and {@code arch}. If none is found and a non-blank {@code url} + * is provided, it falls back to searching for an active system template by the + * URL path segment (the substring after the last '/' in the URL).

+ * + * @param templateName the template name to search for + * @param hypervisorType the hypervisor type + * @param arch the CPU architecture + * @param url optional download URL used as a fallback; may be {@code null} or blank + * @return the matching {@code VMTemplateVO} if found; {@code null} otherwise + */ + public VMTemplateVO getRegisteredTemplate(String templateName, Hypervisor.HypervisorType hypervisorType, + CPU.CPUArch arch, String url) { + VMTemplateVO registeredTemplate = vmTemplateDao.findLatestTemplateByName(templateName, hypervisorType, arch); + if (registeredTemplate == null && StringUtils.isNotBlank(url)) { + String urlPath = url.substring(url.lastIndexOf("/") + 1); + LOGGER.debug("No template found by name, falling back to search existing SYSTEM template by " + + "urlPath: {}, hypervisor: {}, arch:{}", urlPath, hypervisorType, arch); + registeredTemplate = vmTemplateDao.findActiveSystemTemplateByHypervisorArchAndUrlPath(hypervisorType, arch, + urlPath); + } + LOGGER.debug("Found existing registered template for hypervisor: {}, arch: {}: {}", hypervisorType, + arch, registeredTemplate); + return registeredTemplate; + } + + /** + * Update or register system VM Templates based on metadata. + * Runs the registration logic inside a database transaction: obtains the + * set of hypervisors/architectures in use, iterates over metadata entries + * and attempts to register or update each template. + * + * @param conn retained for compatibility with callers (not used directly) + */ public void updateSystemVmTemplates(final Connection conn) { - LOGGER.debug("Updating System Vm template IDs"); - updateSystemVmTemplateGuestOsId(); + LOGGER.debug("Updating System VM templates"); + updateHypervisorGuestOsMap(); Transaction.execute(new TransactionCallbackNoReturn() { @Override public void doInTransactionWithoutResult(final TransactionStatus status) { @@ -1069,10 +1269,9 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { } catch (final Exception e) { throw new CloudRuntimeException("Exception while getting hypervisor types from clusters", e); } - Collection templateEntries = NewTemplateMap.values(); - for (MetadataTemplateDetails templateDetails : templateEntries) { + for (MetadataTemplateDetails templateDetails : METADATA_TEMPLATE_LIST) { try { - if (registerOrUpdateSystemVmTemplate(templateDetails, hypervisorsInUse)) { + if (updateOrRegisterSystemVmTemplate(templateDetails, hypervisorsInUse)) { break; } } catch (final Exception e) { @@ -1081,24 +1280,11 @@ public void doInTransactionWithoutResult(final TransactionStatus status) { throw new CloudRuntimeException(errMsg, e); } } - LOGGER.debug("Updating System Vm Template IDs Complete"); + LOGGER.debug("Updating System VM Templates Complete"); } }); } - public String getNfsVersion(long storeId) { - final String configKey = "secstorage.nfs.version"; - final Map storeDetails = imageStoreDetailsDao.getDetails(storeId); - if (storeDetails != null && storeDetails.containsKey(configKey)) { - return storeDetails.get(configKey); - } - ConfigurationVO globalNfsVersion = configurationDao.findByName(configKey); - if (globalNfsVersion != null) { - return globalNfsVersion.getValue(); - } - return null; - } - protected static class MetadataTemplateDetails { private final Hypervisor.HypervisorType hypervisorType; private final String name; @@ -1160,6 +1346,16 @@ public String getDefaultFilePath() { return TEMPLATES_PATH + filename; } + public boolean isFileChecksumDifferent(File file) { + String fileChecksum = DigestHelper.calculateChecksum(file); + if (!fileChecksum.equals(getChecksum())) { + LOGGER.error("Checksum {} for file {} does not match checksum {} from metadata", + fileChecksum, file, getChecksum()); + return true; + } + return false; + } + public String getHypervisorArchLog() { return SystemVmTemplateRegistration.getHypervisorArchLog(hypervisorType, arch); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java index 524b6a34893b..d4cdbcb9707d 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade2214to30.java @@ -77,8 +77,6 @@ public void performDataMigration(Connection conn) { encryptData(conn); // drop keys dropKeysIfExist(conn); - //update template ID for system Vms - //updateSystemVms(conn); This is not required as system template update is handled during 4.2 upgrade // update domain network ref updateDomainNetworkRef(conn); // update networks that use redundant routers to the new network offering diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java index aa427252585f..bd8ddaa7c498 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade302to40.java @@ -62,7 +62,6 @@ public InputStream[] getPrepareScripts() { @Override public void performDataMigration(Connection conn) { - //updateVmWareSystemVms(conn); This is not required as system template update is handled during 4.2 upgrade correctVRProviders(conn); correctMultiplePhysicaNetworkSetups(conn); addHostDetailsUniqueKey(conn); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java index 3167dd8115b4..38dc90b460dd 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade304to305.java @@ -65,7 +65,6 @@ public void performDataMigration(Connection conn) { addVpcProvider(conn); updateRouterNetworkRef(conn); fixZoneUsingExternalDevices(conn); -// updateSystemVms(conn); fixForeignKeys(conn); encryptClusterDetails(conn); } @@ -81,54 +80,6 @@ public InputStream[] getCleanupScripts() { return new InputStream[] {script}; } - private void updateSystemVms(Connection conn) { - PreparedStatement pstmt = null; - ResultSet rs = null; - boolean VMware = false; - try { - pstmt = conn.prepareStatement("select distinct(hypervisor_type) from `cloud`.`cluster` where removed is null"); - rs = pstmt.executeQuery(); - while (rs.next()) { - if ("VMware".equals(rs.getString(1))) { - VMware = true; - } - } - } catch (SQLException e) { - throw new CloudRuntimeException("Error while iterating through list of hypervisors in use", e); - } - // Just update the VMware system template. Other hypervisor templates are unchanged from previous 3.0.x versions. - logger.debug("Updating VMware System Vms"); - try { - //Get 3.0.5 VMware system Vm template Id - pstmt = conn.prepareStatement("select id from `cloud`.`vm_template` where name = 'systemvm-vmware-3.0.5' and removed is null"); - rs = pstmt.executeQuery(); - if (rs.next()) { - long templateId = rs.getLong(1); - rs.close(); - pstmt.close(); - // change template type to SYSTEM - pstmt = conn.prepareStatement("update `cloud`.`vm_template` set type='SYSTEM' where id = ?"); - pstmt.setLong(1, templateId); - pstmt.executeUpdate(); - pstmt.close(); - // update template ID of system Vms - pstmt = conn.prepareStatement("update `cloud`.`vm_instance` set vm_template_id = ? where type <> 'User' and hypervisor_type = 'VMware'"); - pstmt.setLong(1, templateId); - pstmt.executeUpdate(); - pstmt.close(); - } else { - if (VMware) { - throw new CloudRuntimeException("3.0.5 VMware SystemVm Template not found. Cannot upgrade system Vms"); - } else { - logger.warn("3.0.5 VMware SystemVm Template not found. VMware hypervisor is not used, so not failing upgrade"); - } - } - } catch (SQLException e) { - throw new CloudRuntimeException("Error while updating VMware systemVM Template", e); - } - logger.debug("Updating System VM Template IDs Complete"); - } - private void addVpcProvider(Connection conn) { //Encrypt config params and change category to Hidden logger.debug("Adding VPC provider to all physical Networks in the system"); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java index 94e6149e73b2..a66aa69798aa 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade410to420.java @@ -45,6 +45,10 @@ public class Upgrade410to420 extends DbUpgradeAbstractImpl { + public static final String UNABLE_TO_PERSIST_VSWITCH_CONFIGURATION_OF_VMWARE_CLUSTERS = "Unable to persist vswitch configuration of VMware clusters."; + public static final String INSERT_MODIFIED_ROWS = "Insert modified {} rows"; + public static final String UPDATE_MODIFIED_ROWS = "Update modified {} rows"; + @Override public String[] getUpgradableVersionRange() { return new String[] {"4.1.0", "4.2.0"}; @@ -55,11 +59,6 @@ public String getUpgradedVersion() { return "4.2.0"; } - @Override - public boolean supportsRollingUpgrade() { - return false; - } - @Override public InputStream[] getPrepareScripts() { final String scriptFile = "META-INF/db/schema-410to420.sql"; @@ -117,12 +116,12 @@ public void performDataMigration(Connection conn) { private void createFullCloneFlag(Connection conn) { String update_sql; int numRows = 0; - try (PreparedStatement delete = conn.prepareStatement("delete from `cloud`.`configuration` where name='vmware.create.full.clone';");) + try (PreparedStatement delete = conn.prepareStatement("delete from `cloud`.`configuration` where name='vmware.create.full.clone';")) { delete.executeUpdate(); - try(PreparedStatement query = conn.prepareStatement("select count(*) from `cloud`.`data_center`");) + try(PreparedStatement query = conn.prepareStatement("select count(*) from `cloud`.`data_center`")) { - try(ResultSet rs = query.executeQuery();) { + try(ResultSet rs = query.executeQuery()) { if (rs.next()) { numRows = rs.getInt(1); } @@ -131,7 +130,7 @@ private void createFullCloneFlag(Connection conn) { } else { update_sql = "insert into `cloud`.`configuration` (`category`, `instance`, `component`, `name`, `value`, `description`) VALUES ('Advanced', 'DEFAULT', 'UserVmManager', 'vmware.create.full.clone' , 'true', 'If set to true, creates VMs as full clones on ESX hypervisor');"; } - try(PreparedStatement update_pstmt = conn.prepareStatement(update_sql);) { + try(PreparedStatement update_pstmt = conn.prepareStatement(update_sql)) { update_pstmt.executeUpdate(); }catch (SQLException e) { throw new CloudRuntimeException("Failed to set global flag vmware.create.full.clone: ", e); @@ -148,7 +147,7 @@ private void createFullCloneFlag(Connection conn) { } private void migrateVolumeOnSecondaryStorage(Connection conn) { - try (PreparedStatement sql = conn.prepareStatement("update `cloud`.`volumes` set state='Uploaded' where state='UploadOp'");){ + try (PreparedStatement sql = conn.prepareStatement("update `cloud`.`volumes` set state='Uploaded' where state='UploadOp'")){ sql.executeUpdate(); } catch (SQLException e) { throw new CloudRuntimeException("Failed to upgrade volume state: ", e); @@ -156,7 +155,7 @@ private void migrateVolumeOnSecondaryStorage(Connection conn) { } private void persistVswitchConfiguration(Connection conn) { - Long clusterId; + long clusterId; String clusterHypervisorType; final String NEXUS_GLOBAL_CONFIG_PARAM_NAME = "vmware.use.nexus.vswitch"; final String DVS_GLOBAL_CONFIG_PARAM_NAME = "vmware.use.dvswitch"; @@ -168,10 +167,10 @@ private void persistVswitchConfiguration(Connection conn) { boolean nexusEnabled = false; String publicVswitchType = VMWARE_STANDARD_VSWITCH; String guestVswitchType = VMWARE_STANDARD_VSWITCH; - Map>> detailsMap = new HashMap>>(); + Map>> detailsMap = new HashMap<>(); List> detailsList; - try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL");){ - try(ResultSet clusters = clustersQuery.executeQuery();) { + try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL")){ + try(ResultSet clusters = clustersQuery.executeQuery()) { while (clusters.next()) { clusterHypervisorType = clusters.getString("hypervisor_type"); clusterId = clusters.getLong("id"); @@ -186,20 +185,19 @@ private void persistVswitchConfiguration(Connection conn) { publicVswitchType = NEXUS_1000V_DVSWITCH; guestVswitchType = NEXUS_1000V_DVSWITCH; } - detailsList = new ArrayList>(); - detailsList.add(new Pair(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, guestVswitchType)); - detailsList.add(new Pair(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, publicVswitchType)); + detailsList = new ArrayList<>(); + detailsList.add(new Pair<>(ApiConstants.VSWITCH_TYPE_GUEST_TRAFFIC, guestVswitchType)); + detailsList.add(new Pair<>(ApiConstants.VSWITCH_TYPE_PUBLIC_TRAFFIC, publicVswitchType)); detailsMap.put(clusterId, detailsList); updateClusterDetails(conn, detailsMap); - logger.debug("Persist vSwitch Configuration: Successfully persisted vswitch configuration for cluster " + clusterId); + logger.debug("Persist vSwitch Configuration: Successfully persisted vswitch configuration for cluster {}", clusterId); } else { - logger.debug("Persist vSwitch Configuration: Ignoring cluster " + clusterId + " with hypervisor type " + clusterHypervisorType); - continue; + logger.debug("Persist vSwitch Configuration: Ignoring cluster {} with hypervisor type {}", clusterId, clusterHypervisorType); } } // End cluster iteration - }catch (SQLException e) { - String msg = "Unable to persist vswitch configuration of VMware clusters." + e.getMessage(); + } catch (SQLException e) { + String msg = UNABLE_TO_PERSIST_VSWITCH_CONFIGURATION_OF_VMWARE_CLUSTERS + e.getMessage(); logger.error(msg); throw new CloudRuntimeException(msg, e); } @@ -209,10 +207,11 @@ private void persistVswitchConfiguration(Connection conn) { setConfigurationParameter(conn, VSWITCH_GLOBAL_CONFIG_PARAM_CATEGORY, DVS_GLOBAL_CONFIG_PARAM_NAME, "true"); } } catch (SQLException e) { - String msg = "Unable to persist vswitch configuration of VMware clusters." + e.getMessage(); + String msg = UNABLE_TO_PERSIST_VSWITCH_CONFIGURATION_OF_VMWARE_CLUSTERS + e.getMessage(); logger.error(msg); throw new CloudRuntimeException(msg, e); } + } private void updateClusterDetails(Connection conn, Map>> detailsMap) { @@ -227,7 +226,7 @@ private void updateClusterDetails(Connection conn, Map keyValuePair : keyValues) { key = keyValuePair.first(); val = keyValuePair.second(); @@ -236,7 +235,7 @@ private void updateClusterDetails(Connection conn, Map keys = new ArrayList(); + List keys = new ArrayList<>(); keys.add("fk_external_dhcp_devices_nsp_id"); keys.add("fk_external_dhcp_devices_host_id"); keys.add("fk_external_dhcp_devices_pod_id"); @@ -397,15 +395,15 @@ private void fixBaremetalForeignKeys(Connection conn) { keys.add("fk_external_pxe_devices_physical_network_id"); DbUpgradeUtils.dropKeysIfExist(conn, "baremetal_pxe_devices", keys, true); - try (PreparedStatement alter_pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE");) + try (PreparedStatement alter_pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE")) { alter_pstmt.executeUpdate(); try(PreparedStatement alter_pstmt_id = - conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE"); + conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE") ) { alter_pstmt_id.executeUpdate(); try(PreparedStatement alter_pstmt_phy_net = - conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE");) + conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_dhcp_devices` ADD CONSTRAINT `fk_external_dhcp_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE")) { alter_pstmt_phy_net.executeUpdate(); }catch (SQLException e) { @@ -419,14 +417,14 @@ private void fixBaremetalForeignKeys(Connection conn) { throw new CloudRuntimeException("Unable to add foreign keys to baremetal_dhcp_devices table", e); } try (PreparedStatement alter_pxe_pstmt = - conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE");) + conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_nsp_id` FOREIGN KEY (`nsp_id`) REFERENCES `physical_network_service_providers` (`id`) ON DELETE CASCADE")) { alter_pxe_pstmt.executeUpdate(); try(PreparedStatement alter_pxe_id_pstmt = - conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE");) { + conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_host_id` FOREIGN KEY (`host_id`) REFERENCES `host`(`id`) ON DELETE CASCADE")) { alter_pxe_id_pstmt.executeUpdate(); try(PreparedStatement alter_pxe_phy_net_pstmt = - conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE");) { + conn.prepareStatement("ALTER TABLE `cloud`.`baremetal_pxe_devices` ADD CONSTRAINT `fk_external_pxe_devices_physical_network_id` FOREIGN KEY (`physical_network_id`) REFERENCES `physical_network`(`id`) ON DELETE CASCADE")) { alter_pxe_phy_net_pstmt.executeUpdate(); }catch (SQLException e) { throw new CloudRuntimeException("Unable to add foreign keys to baremetal_pxe_devices table", e); @@ -442,13 +440,13 @@ private void fixBaremetalForeignKeys(Connection conn) { private void addIndexForAlert(Connection conn) { //First drop if it exists. (Due to patches shipped to customers some will have the index and some won't.) - List indexList = new ArrayList(); + List indexList = new ArrayList<>(); logger.debug("Dropping index i_alert__last_sent if it exists"); indexList.add("last_sent"); // in 4.1, we created this index that is not in convention. indexList.add("i_alert__last_sent"); DbUpgradeUtils.dropKeysIfExist(conn, "alert", indexList, false); //Now add index. - try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`alert` ADD INDEX `i_alert__last_sent`(`last_sent`)");) + try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`alert` ADD INDEX `i_alert__last_sent`(`last_sent`)")) { pstmt.executeUpdate(); logger.debug("Added index i_alert__last_sent for table alert"); @@ -457,76 +455,19 @@ private void addIndexForAlert(Connection conn) { } } - private void dropUploadTable(Connection conn) { - try(PreparedStatement pstmt0 = conn.prepareStatement("SELECT url, created, type_id, host_id from upload where type=?");) { - // Read upload table - Templates - logger.debug("Populating template_store_ref table"); - pstmt0.setString(1, "TEMPLATE"); - try(ResultSet rs0 = pstmt0.executeQuery();) - { - try(PreparedStatement pstmt1 = conn.prepareStatement("UPDATE template_store_ref SET download_url=?, download_url_created=? where template_id=? and store_id=?");) { - //Update template_store_ref - while (rs0.next()) { - pstmt1.setString(1, rs0.getString("url")); - pstmt1.setDate(2, rs0.getDate("created")); - pstmt1.setLong(3, rs0.getLong("type_id")); - pstmt1.setLong(4, rs0.getLong("host_id")); - pstmt1.executeUpdate(); - } - // Read upload table - Volumes - logger.debug("Populating volume store ref table"); - try(PreparedStatement pstmt2 = conn.prepareStatement("SELECT url, created, type_id, host_id, install_path from upload where type=?");) { - pstmt2.setString(1, "VOLUME"); - try(ResultSet rs2 = pstmt2.executeQuery();) { - - try(PreparedStatement pstmt3 = - conn.prepareStatement("INSERT IGNORE INTO volume_store_ref (volume_id, store_id, zone_id, created, state, download_url, download_url_created, install_path) VALUES (?,?,?,?,?,?,?,?)");) { - //insert into template_store_ref - while (rs2.next()) { - pstmt3.setLong(1, rs2.getLong("type_id")); - pstmt3.setLong(2, rs2.getLong("host_id")); - pstmt3.setLong(3, 1l);// ??? - pstmt3.setDate(4, rs2.getDate("created")); - pstmt3.setString(5, "Ready"); - pstmt3.setString(6, rs2.getString("url")); - pstmt3.setDate(7, rs2.getDate("created")); - pstmt3.setString(8, rs2.getString("install_path")); - pstmt3.executeUpdate(); - } - }catch (SQLException e) { - throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e); - } - }catch (SQLException e) { - throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e); - } - }catch (SQLException e) { - throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e); - } - }catch (SQLException e) { - throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e); - } - }catch (SQLException e) { - throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e); - } - - } catch (SQLException e) { - throw new CloudRuntimeException("Unable add date into template/volume store ref from upload table.", e); - } - } - //KVM snapshot flag: only turn on if Customers is using snapshot; private void setKVMSnapshotFlag(Connection conn) { logger.debug("Verify and set the KVM snapshot flag if snapshot was used. "); - try(PreparedStatement pstmt = conn.prepareStatement("select count(*) from `cloud`.`snapshots` where hypervisor_type = 'KVM'");) + try(PreparedStatement pstmt = conn.prepareStatement("select count(*) from `cloud`.`snapshots` where hypervisor_type = 'KVM'")) { int numRows = 0; - try(ResultSet rs = pstmt.executeQuery();) { + try(ResultSet rs = pstmt.executeQuery()) { if (rs.next()) { numRows = rs.getInt(1); } if (numRows > 0) { //Add the configuration flag - try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = 'kvm.snapshot.enabled'");) { + try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` SET value = ? WHERE name = 'kvm.snapshot.enabled'")) { update_pstmt.setString(1, "true"); update_pstmt.executeUpdate(); }catch (SQLException e) { @@ -543,19 +484,19 @@ private void setKVMSnapshotFlag(Connection conn) { } private void updatePrimaryStore(Connection conn) { - try(PreparedStatement sql = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type = 'Filesystem' or pool_type = 'LVM'");) { + try(PreparedStatement sql = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type = 'Filesystem' or pool_type = 'LVM'")) { sql.setString(1, DataStoreProvider.DEFAULT_PRIMARY); sql.setString(2, "HOST"); sql.executeUpdate(); - try(PreparedStatement sql2 = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type != 'Filesystem' and pool_type != 'LVM'");) { + try(PreparedStatement sql2 = conn.prepareStatement("update storage_pool set storage_provider_name = ? , scope = ? where pool_type != 'Filesystem' and pool_type != 'LVM'")) { sql2.setString(1, DataStoreProvider.DEFAULT_PRIMARY); sql2.setString(2, "CLUSTER"); sql2.executeUpdate(); }catch (SQLException e) { - throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e.toString()); + throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e, e); } } catch (SQLException e) { - throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e.toString()); + throw new CloudRuntimeException("Failed to upgrade vm template data store uuid: " + e, e); } } @@ -565,20 +506,20 @@ private void updateOverCommitRatioClusterDetails(Connection conn) { PreparedStatement pstmt = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` WHERE removed IS NULL"); PreparedStatement pstmt1 = conn.prepareStatement("INSERT INTO `cloud`.`cluster_details` (cluster_id, name, value) VALUES(?, 'cpuOvercommitRatio', ?)"); PreparedStatement pstmt2 = conn.prepareStatement("INSERT INTO `cloud`.`cluster_details` (cluster_id, name, value) VALUES(?, 'memoryOvercommitRatio', ?)"); - PreparedStatement pstmt3 = conn.prepareStatement("select value from `cloud`.`configuration` where name=?");) { + PreparedStatement pstmt3 = conn.prepareStatement("select value from `cloud`.`configuration` where name=?")) { String global_cpu_overprovisioning_factor = "1"; String global_mem_overprovisioning_factor = "1"; pstmt3.setString(1, "cpu.overprovisioning.factor"); - try (ResultSet rscpu_global = pstmt3.executeQuery();) { + try (ResultSet rscpu_global = pstmt3.executeQuery()) { if (rscpu_global.next()) global_cpu_overprovisioning_factor = rscpu_global.getString(1); } pstmt3.setString(1, "mem.overprovisioning.factor"); - try (ResultSet rsmem_global = pstmt3.executeQuery();) { + try (ResultSet rsmem_global = pstmt3.executeQuery()) { if (rsmem_global.next()) global_mem_overprovisioning_factor = rsmem_global.getString(1); } - try (ResultSet rs1 = pstmt.executeQuery();) { + try (ResultSet rs1 = pstmt.executeQuery()) { while (rs1.next()) { long id = rs1.getLong(1); String hypervisor_type = rs1.getString(2); @@ -643,29 +584,34 @@ private void upgradeVmwareLabels(Connection conn) { String trafficTypeVswitchParamValue; try (PreparedStatement pstmt = - conn.prepareStatement("select name,value from `cloud`.`configuration` where category='Hidden' and value is not NULL and name REGEXP 'vmware*.vswitch';");) + conn.prepareStatement("select name,value from `cloud`.`configuration` where category='Hidden' and value is not NULL and name REGEXP 'vmware*.vswitch';")) { // update the existing vmware traffic labels - try(ResultSet rsParams = pstmt.executeQuery();) { + try(ResultSet rsParams = pstmt.executeQuery()) { while (rsParams.next()) { trafficTypeVswitchParam = rsParams.getString("name"); trafficTypeVswitchParamValue = rsParams.getString("value"); // When upgraded from 4.0 to 4.1 update physical network traffic label with trafficTypeVswitchParam - if (trafficTypeVswitchParam.equals("vmware.private.vswitch")) { - trafficType = "Management"; //TODO(sateesh): Ignore storage traffic, as required physical network already implemented, anything else tobe done? - } else if (trafficTypeVswitchParam.equals("vmware.public.vswitch")) { - trafficType = "Public"; - } else if (trafficTypeVswitchParam.equals("vmware.guest.vswitch")) { - trafficType = "Guest"; + switch (trafficTypeVswitchParam) { + case "vmware.private.vswitch": + trafficType = "Management"; //TODO(sateesh): Ignore storage traffic, as required physical network already implemented, anything else tobe done? + + break; + case "vmware.public.vswitch": + trafficType = "Public"; + break; + case "vmware.guest.vswitch": + trafficType = "Guest"; + break; } try(PreparedStatement sel_pstmt = - conn.prepareStatement("select physical_network_id, traffic_type, vmware_network_label from physical_network_traffic_types where vmware_network_label is not NULL and traffic_type=?;");) { + conn.prepareStatement("select physical_network_id, traffic_type, vmware_network_label from physical_network_traffic_types where vmware_network_label is not NULL and traffic_type=?;")) { pstmt.setString(1, trafficType); - try(ResultSet rsLabel = sel_pstmt.executeQuery();) { + try(ResultSet rsLabel = sel_pstmt.executeQuery()) { newLabel = getNewLabel(rsLabel, trafficTypeVswitchParamValue); try(PreparedStatement update_pstmt = - conn.prepareStatement("update physical_network_traffic_types set vmware_network_label = ? where traffic_type = ? and vmware_network_label is not NULL;");) { - logger.debug("Updating VMware label for " + trafficType + " traffic. Update SQL statement is " + pstmt); + conn.prepareStatement("update physical_network_traffic_types set vmware_network_label = ? where traffic_type = ? and vmware_network_label is not NULL;")) { + logger.debug("Updating VMware label for {} traffic. Update SQL statement is {}", trafficType, pstmt); pstmt.setString(1, newLabel); pstmt.setString(2, trafficType); update_pstmt.executeUpdate(); @@ -688,17 +634,17 @@ private void upgradeVmwareLabels(Connection conn) { } private void persistLegacyZones(Connection conn) { - List listOfLegacyZones = new ArrayList(); - List listOfNonLegacyZones = new ArrayList(); - Map> dcToZoneMap = new HashMap>(); + List listOfLegacyZones = new ArrayList<>(); + List listOfNonLegacyZones = new ArrayList<>(); + Map> dcToZoneMap = new HashMap<>(); ResultSet clusters = null; Long zoneId; - Long clusterId; + long clusterId; ArrayList dcList = null; String clusterHypervisorType; boolean legacyZone; boolean ignoreZone; - Long count; + long count; String dcOfPreviousCluster = null; String dcOfCurrentCluster = null; String[] tokens; @@ -706,15 +652,15 @@ private void persistLegacyZones(Connection conn) { String vc = ""; String dcName = ""; - try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`data_center` where removed is NULL");) { - try (ResultSet rs = pstmt.executeQuery();) { + try (PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`data_center` where removed is NULL")) { + try (ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { zoneId = rs.getLong("id"); - try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL AND data_center_id=?");) { + try (PreparedStatement clustersQuery = conn.prepareStatement("select id, hypervisor_type from `cloud`.`cluster` where removed is NULL AND data_center_id=?")) { clustersQuery.setLong(1, zoneId); legacyZone = false; ignoreZone = true; - dcList = new ArrayList(); + dcList = new ArrayList<>(); count = 0L; // Legacy zone term is meant only for VMware // Legacy zone is a zone with at least 2 clusters & with multiple DCs or VCs @@ -730,9 +676,9 @@ private void persistLegacyZones(Connection conn) { if (clusterHypervisorType.equalsIgnoreCase("VMware")) { ignoreZone = false; try (PreparedStatement clusterDetailsQuery = conn - .prepareStatement("select value from `cloud`.`cluster_details` where name='url' and cluster_id=?");) { + .prepareStatement("select value from `cloud`.`cluster_details` where name='url' and cluster_id=?")) { clusterDetailsQuery.setLong(1, clusterId); - try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery();) { + try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery()) { clusterDetails.next(); url = clusterDetails.getString("value"); tokens = url.split("/"); // url format - http://vcenter/dc/cluster @@ -746,7 +692,7 @@ private void persistLegacyZones(Connection conn) { if (count > 0) { if (!dcOfPreviousCluster.equalsIgnoreCase(dcOfCurrentCluster)) { legacyZone = true; - logger.debug("Marking the zone " + zoneId + " as legacy zone."); + logger.debug("Marking the zone {} as legacy zone.", zoneId); } } } catch (SQLException e) { @@ -756,7 +702,7 @@ private void persistLegacyZones(Connection conn) { throw new CloudRuntimeException("Unable add zones to cloud.legacyzones table.", e); } } else { - logger.debug("Ignoring zone " + zoneId + " with hypervisor type " + clusterHypervisorType); + logger.debug("Ignoring zone {} with hypervisor type {}", zoneId, clusterHypervisorType); break; } count++; @@ -774,7 +720,7 @@ private void persistLegacyZones(Connection conn) { listOfNonLegacyZones.add(zoneId); } for (String dc : dcList) { - ArrayList dcZones = new ArrayList(); + ArrayList dcZones = new ArrayList<>(); if (dcToZoneMap.get(dc) != null) { dcZones = dcToZoneMap.get(dc); } @@ -796,22 +742,22 @@ private void persistLegacyZones(Connection conn) { updateLegacyZones(conn, listOfLegacyZones); updateNonLegacyZones(conn, listOfNonLegacyZones); } catch (SQLException e) { - logger.error("Unable to discover legacy zones." + e.getMessage(),e); + logger.error("Unable to discover legacy zones.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to discover legacy zones." + e.getMessage(), e); } }catch (SQLException e) { - logger.error("Unable to discover legacy zones." + e.getMessage(),e); + logger.error("Unable to discover legacy zones.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to discover legacy zones." + e.getMessage(), e); } } private void updateLegacyZones(Connection conn, List zones) { //Insert legacy zones into table for legacy zones. - try (PreparedStatement legacyZonesQuery = conn.prepareStatement("INSERT INTO `cloud`.`legacy_zones` (zone_id) VALUES (?)");){ + try (PreparedStatement legacyZonesQuery = conn.prepareStatement("INSERT INTO `cloud`.`legacy_zones` (zone_id) VALUES (?)")){ for (Long zoneId : zones) { legacyZonesQuery.setLong(1, zoneId); legacyZonesQuery.executeUpdate(); - logger.debug("Inserted zone " + zoneId + " into cloud.legacyzones table"); + logger.debug("Inserted zone {} into cloud.legacyzones table", zoneId); } } catch (SQLException e) { throw new CloudRuntimeException("Unable add zones to cloud.legacyzones table.", e); @@ -821,22 +767,22 @@ private void updateLegacyZones(Connection conn, List zones) { private void updateNonLegacyZones(Connection conn, List zones) { try { for (Long zoneId : zones) { - logger.debug("Discovered non-legacy zone " + zoneId + ". Processing the zone to associate with VMware datacenter."); + logger.debug("Discovered non-legacy zone {}. Processing the zone to associate with VMware datacenter.", zoneId); // All clusters in a non legacy zone will belong to the same VMware DC, hence pick the first cluster - try (PreparedStatement clustersQuery = conn.prepareStatement("select id from `cloud`.`cluster` where removed is NULL AND data_center_id=?");) { + try (PreparedStatement clustersQuery = conn.prepareStatement("select id from `cloud`.`cluster` where removed is NULL AND data_center_id=?")) { clustersQuery.setLong(1, zoneId); - try (ResultSet clusters = clustersQuery.executeQuery();) { + try (ResultSet clusters = clustersQuery.executeQuery()) { clusters.next(); - Long clusterId = clusters.getLong("id"); + long clusterId = clusters.getLong("id"); // Get VMware datacenter details from cluster_details table String user = null; String password = null; String url = null; - try (PreparedStatement clusterDetailsQuery = conn.prepareStatement("select name, value from `cloud`.`cluster_details` where cluster_id=?");) { + try (PreparedStatement clusterDetailsQuery = conn.prepareStatement("select name, value from `cloud`.`cluster_details` where cluster_id=?")) { clusterDetailsQuery.setLong(1, clusterId); - try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery();) { + try (ResultSet clusterDetails = clusterDetailsQuery.executeQuery()) { while (clusterDetails.next()) { String key = clusterDetails.getString(1); String value = clusterDetails.getString(2); @@ -854,7 +800,7 @@ private void updateNonLegacyZones(Connection conn, List zones) { String guid = dcName + "@" + vc; try (PreparedStatement insertVmWareDC = conn - .prepareStatement("INSERT INTO `cloud`.`vmware_data_center` (uuid, name, guid, vcenter_host, username, password) values(?, ?, ?, ?, ?, ?)");) { + .prepareStatement("INSERT INTO `cloud`.`vmware_data_center` (uuid, name, guid, vcenter_host, username, password) values(?, ?, ?, ?, ?, ?)")) { insertVmWareDC.setString(1, UUID.randomUUID().toString()); insertVmWareDC.setString(2, dcName); insertVmWareDC.setString(3, guid); @@ -863,16 +809,16 @@ private void updateNonLegacyZones(Connection conn, List zones) { insertVmWareDC.setString(6, password); insertVmWareDC.executeUpdate(); } - try (PreparedStatement selectVmWareDC = conn.prepareStatement("SELECT id FROM `cloud`.`vmware_data_center` where guid=?");) { + try (PreparedStatement selectVmWareDC = conn.prepareStatement("SELECT id FROM `cloud`.`vmware_data_center` where guid=?")) { selectVmWareDC.setString(1, guid); - try (ResultSet vmWareDcInfo = selectVmWareDC.executeQuery();) { - Long vmwareDcId = -1L; + try (ResultSet vmWareDcInfo = selectVmWareDC.executeQuery()) { + long vmwareDcId = -1L; if (vmWareDcInfo.next()) { vmwareDcId = vmWareDcInfo.getLong("id"); } try (PreparedStatement insertMapping = conn - .prepareStatement("INSERT INTO `cloud`.`vmware_data_center_zone_map` (zone_id, vmware_data_center_id) values(?, ?)");) { + .prepareStatement("INSERT INTO `cloud`.`vmware_data_center_zone_map` (zone_id, vmware_data_center_id) values(?, ?)")) { insertMapping.setLong(1, zoneId); insertMapping.setLong(2, vmwareDcId); insertMapping.executeUpdate(); @@ -893,17 +839,17 @@ private void updateNonLegacyZones(Connection conn, List zones) { private void createPlaceHolderNics(Connection conn) { try (PreparedStatement pstmt = - conn.prepareStatement("SELECT network_id, gateway, ip4_address FROM `cloud`.`nics` WHERE reserver_name IN ('DirectNetworkGuru','DirectPodBasedNetworkGuru') and vm_type='DomainRouter' AND removed IS null");) + conn.prepareStatement("SELECT network_id, gateway, ip4_address FROM `cloud`.`nics` WHERE reserver_name IN ('DirectNetworkGuru','DirectPodBasedNetworkGuru') and vm_type='DomainRouter' AND removed IS null")) { - try(ResultSet rs = pstmt.executeQuery();) { + try(ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { - Long networkId = rs.getLong(1); + long networkId = rs.getLong(1); String gateway = rs.getString(2); String ip = rs.getString(3); String uuid = UUID.randomUUID().toString(); //Insert placeholder nic for each Domain router nic in Shared network try(PreparedStatement insert_pstmt = - conn.prepareStatement("INSERT INTO `cloud`.`nics` (uuid, ip4_address, gateway, network_id, state, strategy, vm_type, default_nic, created) VALUES (?, ?, ?, ?, 'Reserved', 'PlaceHolder', 'DomainRouter', 0, now())");) { + conn.prepareStatement("INSERT INTO `cloud`.`nics` (uuid, ip4_address, gateway, network_id, state, strategy, vm_type, default_nic, created) VALUES (?, ?, ?, ?, 'Reserved', 'PlaceHolder', 'DomainRouter', 0, now())")) { insert_pstmt.setString(1, uuid); insert_pstmt.setString(2, ip); insert_pstmt.setString(3, gateway); @@ -912,7 +858,7 @@ private void createPlaceHolderNics(Connection conn) { }catch (SQLException e) { throw new CloudRuntimeException("Unable to create placeholder nics", e); } - logger.debug("Created placeholder nic for the ipAddress " + ip + " and network " + networkId); + logger.debug("Created placeholder nic for the ipAddress {} and network {}", ip, networkId); } }catch (SQLException e) { throw new CloudRuntimeException("Unable to create placeholder nics", e); @@ -923,13 +869,13 @@ private void createPlaceHolderNics(Connection conn) { } private void updateRemoteAccessVpn(Connection conn) { - try(PreparedStatement pstmt = conn.prepareStatement("SELECT vpn_server_addr_id FROM `cloud`.`remote_access_vpn`");) { - try(ResultSet rs = pstmt.executeQuery();) { + try(PreparedStatement pstmt = conn.prepareStatement("SELECT vpn_server_addr_id FROM `cloud`.`remote_access_vpn`")) { + try(ResultSet rs = pstmt.executeQuery()) { long id = 1; while (rs.next()) { String uuid = UUID.randomUUID().toString(); - Long ipId = rs.getLong(1); - try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`remote_access_vpn` set uuid=?, id=? where vpn_server_addr_id=?");) { + long ipId = rs.getLong(1); + try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`remote_access_vpn` set uuid=?, id=? where vpn_server_addr_id=?")) { update_pstmt.setString(1, uuid); update_pstmt.setLong(2, id); update_pstmt.setLong(3, ipId); @@ -949,44 +895,44 @@ private void updateRemoteAccessVpn(Connection conn) { private void addEgressFwRulesForSRXGuestNw(Connection conn) { ResultSet rs = null; - try(PreparedStatement pstmt = conn.prepareStatement("select network_id FROM `cloud`.`ntwk_service_map` where service='Firewall' and provider='JuniperSRX' ");) { + try(PreparedStatement pstmt = conn.prepareStatement("select network_id FROM `cloud`.`ntwk_service_map` where service='Firewall' and provider='JuniperSRX' ")) { rs = pstmt.executeQuery(); while (rs.next()) { long netId = rs.getLong(1); //checking for Isolated OR Virtual try(PreparedStatement sel_net_pstmt = - conn.prepareStatement("select account_id, domain_id FROM `cloud`.`networks` where (guest_type='Isolated' OR guest_type='Virtual') and traffic_type='Guest' and vpc_id is NULL and (state='implemented' OR state='Shutdown') and id=? ");) { + conn.prepareStatement("select account_id, domain_id FROM `cloud`.`networks` where (guest_type='Isolated' OR guest_type='Virtual') and traffic_type='Guest' and vpc_id is NULL and (state='implemented' OR state='Shutdown') and id=? ")) { sel_net_pstmt.setLong(1, netId); logger.debug("Getting account_id, domain_id from networks table: "); - try(ResultSet rsNw = pstmt.executeQuery();) + try(ResultSet rsNw = pstmt.executeQuery()) { if (rsNw.next()) { long accountId = rsNw.getLong(1); long domainId = rsNw.getLong(2); //Add new rule for the existing networks - logger.debug("Adding default egress firewall rule for network " + netId); + logger.debug("Adding default egress firewall rule for network {}", netId); try (PreparedStatement insert_pstmt = - conn.prepareStatement("INSERT INTO firewall_rules (uuid, state, protocol, purpose, account_id, domain_id, network_id, xid, created, traffic_type) VALUES (?, 'Active', 'all', 'Firewall', ?, ?, ?, ?, now(), 'Egress')");) { + conn.prepareStatement("INSERT INTO firewall_rules (uuid, state, protocol, purpose, account_id, domain_id, network_id, xid, created, traffic_type) VALUES (?, 'Active', 'all', 'Firewall', ?, ?, ?, ?, now(), 'Egress')")) { insert_pstmt.setString(1, UUID.randomUUID().toString()); insert_pstmt.setLong(2, accountId); insert_pstmt.setLong(3, domainId); insert_pstmt.setLong(4, netId); insert_pstmt.setString(5, UUID.randomUUID().toString()); - logger.debug("Inserting default egress firewall rule " + insert_pstmt); + logger.debug("Inserting default egress firewall rule {}", insert_pstmt); insert_pstmt.executeUpdate(); } catch (SQLException e) { throw new CloudRuntimeException("Unable to set egress firewall rules ", e); } - try (PreparedStatement sel_firewall_pstmt = conn.prepareStatement("select id from firewall_rules where protocol='all' and network_id=?");) { + try (PreparedStatement sel_firewall_pstmt = conn.prepareStatement("select id from firewall_rules where protocol='all' and network_id=?")) { sel_firewall_pstmt.setLong(1, netId); - try (ResultSet rsId = sel_firewall_pstmt.executeQuery();) { + try (ResultSet rsId = sel_firewall_pstmt.executeQuery()) { long firewallRuleId; if (rsId.next()) { firewallRuleId = rsId.getLong(1); - try (PreparedStatement insert_pstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')");) { + try (PreparedStatement insert_pstmt = conn.prepareStatement("insert into firewall_rules_cidrs (firewall_rule_id,source_cidr) values (?, '0.0.0.0/0')")) { insert_pstmt.setLong(1, firewallRuleId); - logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id=" + firewallRuleId + " with statement " + insert_pstmt); + logger.debug("Inserting rule for cidr 0.0.0.0/0 for the new Firewall rule id={} with statement {}", firewallRuleId, insert_pstmt); insert_pstmt.executeUpdate(); } catch (SQLException e) { throw new CloudRuntimeException("Unable to set egress firewall rules ", e); @@ -1008,15 +954,15 @@ private void addEgressFwRulesForSRXGuestNw(Connection conn) { } private void upgradeEIPNetworkOfferings(Connection conn) { - try (PreparedStatement pstmt = conn.prepareStatement("select id, elastic_ip_service from `cloud`.`network_offerings` where traffic_type='Guest'");) + try (PreparedStatement pstmt = conn.prepareStatement("select id, elastic_ip_service from `cloud`.`network_offerings` where traffic_type='Guest'")) { - try(ResultSet rs = pstmt.executeQuery();) { + try(ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { long id = rs.getLong(1); // check if elastic IP service is enabled for network offering if (rs.getLong(2) != 0) { //update network offering with eip_associate_public_ip set to true - try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`network_offerings` set eip_associate_public_ip=? where id=?");) { + try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`network_offerings` set eip_associate_public_ip=? where id=?")) { update_pstmt.setBoolean(1, true); update_pstmt.setLong(2, id); update_pstmt.executeUpdate(); @@ -1060,24 +1006,24 @@ private void updateNetworkACLs(Connection conn) { PreparedStatement pstmtSelectFirewallCidrs = conn.prepareStatement(sqlSelectFirewallCidrs); PreparedStatement pstmtDeleteFirewallCidr = conn.prepareStatement(sqlDeleteFirewallCidr); PreparedStatement pstmtDeleteFirewallRules = conn.prepareStatement(sqlDeleteFirewallRules); - ResultSet rsNetworkIds = pstmtSelectNetworkIds.executeQuery();) { + ResultSet rsNetworkIds = pstmtSelectNetworkIds.executeQuery()) { //Get all VPC tiers while (rsNetworkIds.next()) { - Long networkId = rsNetworkIds.getLong(1); - logger.debug("Updating network ACLs for network: " + networkId); - Long vpcId = rsNetworkIds.getLong(2); + long networkId = rsNetworkIds.getLong(1); + logger.debug("Updating network ACLs for network: {}", networkId); + long vpcId = rsNetworkIds.getLong(2); String tierUuid = rsNetworkIds.getString(3); pstmtSelectFirewallRules.setLong(1, networkId); boolean hasAcls = false; Long aclId = null; int number = 1; - try (ResultSet rsAcls = pstmtSelectFirewallRules.executeQuery();) { + try (ResultSet rsAcls = pstmtSelectFirewallRules.executeQuery()) { while (rsAcls.next()) { if (!hasAcls) { hasAcls = true; aclId = nextAclId++; //create ACL for the tier - logger.debug("Creating network ACL for tier: " + tierUuid); + logger.debug("Creating network ACL for tier: {}", tierUuid); pstmtInsertNetworkAcl.setLong(1, aclId); pstmtInsertNetworkAcl.setLong(2, vpcId); pstmtInsertNetworkAcl.setString(3, "ACL for tier " + tierUuid); @@ -1085,13 +1031,13 @@ private void updateNetworkACLs(Connection conn) { pstmtInsertNetworkAcl.executeUpdate(); } - Long fwRuleId = rsAcls.getLong(1); + long fwRuleId = rsAcls.getLong(1); String cidr = null; //get cidr from firewall_rules_cidrs pstmtSelectFirewallCidrs.setLong(1, fwRuleId); - try (ResultSet rsCidr = pstmtSelectFirewallCidrs.executeQuery();) { + try (ResultSet rsCidr = pstmtSelectFirewallCidrs.executeQuery()) { while (rsCidr.next()) { - Long cidrId = rsCidr.getLong(1); + long cidrId = rsCidr.getLong(1); String sourceCidr = rsCidr.getString(2); if (cidr == null) { cidr = sourceCidr; @@ -1105,20 +1051,20 @@ private void updateNetworkACLs(Connection conn) { } String aclItemUuid = rsAcls.getString(2); //Move acl to network_acl_item table - logger.debug("Moving firewall rule: " + aclItemUuid); + logger.debug("Moving firewall rule: {}", aclItemUuid); //uuid pstmtInsertNetworkAclItem.setString(1, aclItemUuid); //aclId pstmtInsertNetworkAclItem.setLong(2, aclId); //Start port - Integer startPort = rsAcls.getInt(3); + int startPort = rsAcls.getInt(3); if (rsAcls.wasNull()) { pstmtInsertNetworkAclItem.setNull(3, Types.INTEGER); } else { pstmtInsertNetworkAclItem.setLong(3, startPort); } //End port - Integer endPort = rsAcls.getInt(4); + int endPort = rsAcls.getInt(4); if (rsAcls.wasNull()) { pstmtInsertNetworkAclItem.setNull(4, Types.INTEGER); } else { @@ -1131,7 +1077,7 @@ private void updateNetworkACLs(Connection conn) { String protocol = rsAcls.getString(6); pstmtInsertNetworkAclItem.setString(6, protocol); //icmp_code - Integer icmpCode = rsAcls.getInt(7); + int icmpCode = rsAcls.getInt(7); if (rsAcls.wasNull()) { pstmtInsertNetworkAclItem.setNull(7, Types.INTEGER); } else { @@ -1139,7 +1085,7 @@ private void updateNetworkACLs(Connection conn) { } //icmp_type - Integer icmpType = rsAcls.getInt(8); + int icmpType = rsAcls.getInt(8); if (rsAcls.wasNull()) { pstmtInsertNetworkAclItem.setNull(8, Types.INTEGER); } else { @@ -1183,8 +1129,8 @@ private void updateNetworkACLs(Connection conn) { } private void updateGlobalDeploymentPlanner(Connection conn) { - try (PreparedStatement pstmt = conn.prepareStatement("select value from `cloud`.`configuration` where name = 'vm.allocation.algorithm'");){ - try(ResultSet rs = pstmt.executeQuery();) + try (PreparedStatement pstmt = conn.prepareStatement("select value from `cloud`.`configuration` where name = 'vm.allocation.algorithm'")){ + try(ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { String globalValue = rs.getString(1); @@ -1195,16 +1141,12 @@ private void updateGlobalDeploymentPlanner(Connection conn) { plannerName = "FirstFitPlanner"; } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.firstfit.toString())) { plannerName = "FirstFitPlanner"; - } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_firstfit.toString())) { - plannerName = "UserConcentratedPodPlanner"; - } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userconcentratedpod_random.toString())) { - plannerName = "UserConcentratedPodPlanner"; } else if (globalValue.equals(DeploymentPlanner.AllocationAlgorithm.userdispersing.toString())) { plannerName = "UserDispersingPlanner"; } } // update vm.deployment.planner global config - try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` set value=? where name = 'vm.deployment.planner'");) { + try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`configuration` set value=? where name = 'vm.deployment.planner'")) { update_pstmt.setString(1, plannerName); update_pstmt.executeUpdate(); } catch (SQLException e) { @@ -1221,13 +1163,13 @@ private void updateGlobalDeploymentPlanner(Connection conn) { private void upgradeDefaultVpcOffering(Connection conn) { try(PreparedStatement pstmt = - conn.prepareStatement("select distinct map.vpc_offering_id from `cloud`.`vpc_offering_service_map` map, `cloud`.`vpc_offerings` off where off.id=map.vpc_offering_id AND service='Lb'");) + conn.prepareStatement("select distinct map.vpc_offering_id from `cloud`.`vpc_offering_service_map` map, `cloud`.`vpc_offerings` off where off.id=map.vpc_offering_id AND service='Lb'")) { - try(ResultSet rs = pstmt.executeQuery();) { + try(ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { long id = rs.getLong(1); //Add internal LB vm as a supported provider for the load balancer service - try(PreparedStatement insert_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`vpc_offering_service_map` (vpc_offering_id, service, provider) VALUES (?,?,?)");) { + try(PreparedStatement insert_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`vpc_offering_service_map` (vpc_offering_id, service, provider) VALUES (?,?,?)")) { insert_pstmt.setLong(1, id); insert_pstmt.setString(2, "Lb"); insert_pstmt.setString(3, "InternalLbVm"); @@ -1245,27 +1187,27 @@ private void upgradeDefaultVpcOffering(Connection conn) { } private void upgradePhysicalNtwksWithInternalLbProvider(Connection conn) { - try (PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network` where removed is null");){ - try(ResultSet rs = pstmt.executeQuery();) { + try (PreparedStatement pstmt = conn.prepareStatement("SELECT id FROM `cloud`.`physical_network` where removed is null")){ + try(ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { long pNtwkId = rs.getLong(1); String uuid = UUID.randomUUID().toString(); //Add internal LB VM to the list of physical network service providers try(PreparedStatement insert_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`physical_network_service_providers` " + "(uuid, physical_network_id, provider_name, state, load_balance_service_provided, destination_physical_network_id)" - + " VALUES (?, ?, 'InternalLbVm', 'Enabled', 1, 0)");) { + + " VALUES (?, ?, 'InternalLbVm', 'Enabled', 1, 0)")) { insert_pstmt.setString(1, uuid); insert_pstmt.setLong(2, pNtwkId); insert_pstmt.executeUpdate(); //Add internal lb vm to the list of physical network elements try (PreparedStatement pstmt1 = - conn.prepareStatement("SELECT id FROM `cloud`.`physical_network_service_providers`" + " WHERE physical_network_id=? AND provider_name='InternalLbVm'");) { + conn.prepareStatement("SELECT id FROM `cloud`.`physical_network_service_providers`" + " WHERE physical_network_id=? AND provider_name='InternalLbVm'")) { pstmt1.setLong(1, pNtwkId); - try (ResultSet rs1 = pstmt1.executeQuery();) { + try (ResultSet rs1 = pstmt1.executeQuery()) { while (rs1.next()) { long providerId = rs1.getLong(1); uuid = UUID.randomUUID().toString(); - try(PreparedStatement insert_cloud_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`virtual_router_providers` (nsp_id, uuid, type, enabled) VALUES (?, ?, 'InternalLbVm', 1)");) { + try(PreparedStatement insert_cloud_pstmt = conn.prepareStatement("INSERT INTO `cloud`.`virtual_router_providers` (nsp_id, uuid, type, enabled) VALUES (?, ?, 'InternalLbVm', 1)")) { insert_cloud_pstmt.setLong(1, providerId); insert_cloud_pstmt.setString(2, uuid); insert_cloud_pstmt.executeUpdate(); @@ -1291,14 +1233,14 @@ private void upgradePhysicalNtwksWithInternalLbProvider(Connection conn) { private void addHostDetailsIndex(Connection conn) { logger.debug("Checking if host_details index exists, if not we will add it"); - try(PreparedStatement pstmt = conn.prepareStatement("SHOW INDEX FROM `cloud`.`host_details` where KEY_NAME = 'fk_host_details__host_id'");) + try(PreparedStatement pstmt = conn.prepareStatement("SHOW INDEX FROM `cloud`.`host_details` where KEY_NAME = 'fk_host_details__host_id'")) { - try(ResultSet rs = pstmt.executeQuery();) { + try(ResultSet rs = pstmt.executeQuery()) { if (rs.next()) { logger.debug("Index already exists on host_details - not adding new one"); } else { // add the index - try(PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id` (`host_id`)");) { + try(PreparedStatement pstmtUpdate = conn.prepareStatement("ALTER TABLE `cloud`.`host_details` ADD INDEX `fk_host_details__host_id` (`host_id`)")) { pstmtUpdate.executeUpdate(); logger.debug("Index did not exist on host_details - added new one"); }catch (SQLException e) { @@ -1314,15 +1256,15 @@ private void addHostDetailsIndex(Connection conn) { } private void updateNetworksForPrivateGateways(Connection conn) { - try(PreparedStatement pstmt = conn.prepareStatement("SELECT network_id, vpc_id FROM `cloud`.`vpc_gateways` WHERE type='Private' AND removed IS null");) + try(PreparedStatement pstmt = conn.prepareStatement("SELECT network_id, vpc_id FROM `cloud`.`vpc_gateways` WHERE type='Private' AND removed IS null")) { //1) get all non removed gateways - try(ResultSet rs = pstmt.executeQuery();) { + try(ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { - Long networkId = rs.getLong(1); - Long vpcId = rs.getLong(2); + long networkId = rs.getLong(1); + long vpcId = rs.getLong(2); //2) Update networks with vpc_id if its set to NULL - try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`networks` set vpc_id=? where id=? and vpc_id is NULL and removed is NULL");) { + try (PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`networks` set vpc_id=? where id=? and vpc_id is NULL and removed is NULL")) { update_pstmt.setLong(1, vpcId); update_pstmt.setLong(2, networkId); update_pstmt.executeUpdate(); @@ -1339,13 +1281,13 @@ private void updateNetworksForPrivateGateways(Connection conn) { } private void removeFirewallServiceFromSharedNetworkOfferingWithSGService(Connection conn) { - try(PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='DefaultSharedNetworkOfferingWithSGService'");) + try(PreparedStatement pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='DefaultSharedNetworkOfferingWithSGService'")) { - try(ResultSet rs = pstmt.executeQuery();) { + try(ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { long id = rs.getLong(1); // remove Firewall service for SG shared network offering - try(PreparedStatement del_pstmt = conn.prepareStatement("DELETE from `cloud`.`ntwk_offering_service_map` where network_offering_id=? and service='Firewall'");) { + try(PreparedStatement del_pstmt = conn.prepareStatement("DELETE from `cloud`.`ntwk_offering_service_map` where network_offering_id=? and service='Firewall'")) { del_pstmt.setLong(1, id); del_pstmt.executeUpdate(); }catch (SQLException e) { @@ -1362,9 +1304,9 @@ private void removeFirewallServiceFromSharedNetworkOfferingWithSGService(Connect private void fix22xKVMSnapshots(Connection conn) { logger.debug("Updating KVM snapshots"); - try (PreparedStatement pstmt = conn.prepareStatement("select id, backup_snap_id from `cloud`.`snapshots` where hypervisor_type='KVM' and removed is null and backup_snap_id is not null");) + try (PreparedStatement pstmt = conn.prepareStatement("select id, backup_snap_id from `cloud`.`snapshots` where hypervisor_type='KVM' and removed is null and backup_snap_id is not null")) { - try(ResultSet rs = pstmt.executeQuery();) { + try(ResultSet rs = pstmt.executeQuery()) { while (rs.next()) { long id = rs.getLong(1); String backUpPath = rs.getString(2); @@ -1374,8 +1316,8 @@ private void fix22xKVMSnapshots(Connection conn) { int index = backUpPath.indexOf("snapshots" + File.separator); if (index > 1) { String correctedPath = backUpPath.substring(index); - logger.debug("Updating Snapshot with id: " + id + " original backup path: " + backUpPath + " updated backup path: " + correctedPath); - try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`snapshots` set backup_snap_id=? where id = ?");) { + logger.debug("Updating Snapshot with id: {} original backup path: {} updated backup path: {}", id, backUpPath, correctedPath); + try(PreparedStatement update_pstmt = conn.prepareStatement("UPDATE `cloud`.`snapshots` set backup_snap_id=? where id = ?")) { update_pstmt.setString(1, correctedPath); update_pstmt.setLong(2, id); update_pstmt.executeUpdate(); @@ -1401,8 +1343,8 @@ private void correctExternalNetworkDevicesSetup(Connection conn) { try ( PreparedStatement zoneSearchStmt = conn.prepareStatement("SELECT id, networktype FROM `cloud`.`data_center`"); - ResultSet zoneResults = zoneSearchStmt.executeQuery(); - ){ + ResultSet zoneResults = zoneSearchStmt.executeQuery() + ){ while (zoneResults.next()) { long zoneId = zoneResults.getLong(1); String networkType = zoneResults.getString(2); @@ -1433,7 +1375,6 @@ private void correctExternalNetworkDevicesSetup(Connection conn) { // balancers added in the zone while (f5DevicesResult.next()) { long f5HostId = f5DevicesResult.getLong(1); - ; addF5ServiceProvider(conn, physicalNetworkId, zoneId); addF5LoadBalancer(conn, f5HostId, physicalNetworkId); } @@ -1443,7 +1384,7 @@ private void correctExternalNetworkDevicesSetup(Connection conn) { try (PreparedStatement fetchSRXNspStmt = conn.prepareStatement("SELECT id from `cloud`.`physical_network_service_providers` where physical_network_id=" + physicalNetworkId + " and provider_name = 'JuniperSRX'"); - ResultSet rsSRXNSP = fetchSRXNspStmt.executeQuery();) { + ResultSet rsSRXNSP = fetchSRXNspStmt.executeQuery()) { hasSrxNsp = rsSRXNSP.next(); } @@ -1477,8 +1418,8 @@ private void addF5LoadBalancer(Connection conn, long hostId, long physicalNetwor String insertF5 = "INSERT INTO `cloud`.`external_load_balancer_devices` (physical_network_id, host_id, provider_name, " + "device_name, capacity, is_dedicated, device_state, allocation_state, is_managed, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertF5);) { - logger.debug("Adding F5 Big IP load balancer with host id " + hostId + " in to physical network" + physicalNetworkId); + try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertF5)) { + logger.debug("Adding F5 Big IP load balancer with host id {} in to physical network{}", hostId, physicalNetworkId); pstmtUpdate.setLong(1, physicalNetworkId); pstmtUpdate.setLong(2, hostId); pstmtUpdate.setString(3, "F5BigIp"); @@ -1499,8 +1440,8 @@ private void addSrxFirewall(Connection conn, long hostId, long physicalNetworkId String insertSrx = "INSERT INTO `cloud`.`external_firewall_devices` (physical_network_id, host_id, provider_name, " + "device_name, capacity, is_dedicated, device_state, allocation_state, uuid) VALUES ( ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertSrx);) { - logger.debug("Adding SRX firewall device with host id " + hostId + " in to physical network" + physicalNetworkId); + try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertSrx)) { + logger.debug("Adding SRX firewall device with host id {} in to physical network{}", hostId, physicalNetworkId); pstmtUpdate.setLong(1, physicalNetworkId); pstmtUpdate.setLong(2, hostId); pstmtUpdate.setString(3, "JuniperSRX"); @@ -1522,9 +1463,9 @@ private void addF5ServiceProvider(Connection conn, long physicalNetworkId, long + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," + "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`," + "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,0,0,0,1,0,0,0,0)"; - try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP);) { + try(PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP)) { // add physical network service provider - F5BigIp - logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp" + " in to physical network" + physicalNetworkId); + logger.debug("Adding PhysicalNetworkServiceProvider F5BigIp in to physical network{}", physicalNetworkId); pstmtUpdate.setString(1, UUID.randomUUID().toString()); pstmtUpdate.setLong(2, physicalNetworkId); pstmtUpdate.setString(3, "F5BigIp"); @@ -1541,7 +1482,7 @@ private void addSrxServiceProvider(Connection conn, long physicalNetworkId, long + "`destination_physical_network_id`, `vpn_service_provided`, `dhcp_service_provided`, `dns_service_provided`, `gateway_service_provided`," + "`firewall_service_provided`, `source_nat_service_provided`, `load_balance_service_provided`, `static_nat_service_provided`," + "`port_forwarding_service_provided`, `user_data_service_provided`, `security_group_service_provided`) VALUES (?,?,?,?,0,0,0,0,1,1,1,0,1,1,0,0)"; - try( PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP);) { + try( PreparedStatement pstmtUpdate = conn.prepareStatement(insertPNSP)) { // add physical network service provider - JuniperSRX logger.debug("Adding PhysicalNetworkServiceProvider JuniperSRX"); pstmtUpdate.setString(1, UUID.randomUUID().toString()); @@ -1563,16 +1504,15 @@ private void addSrxServiceProvider(Connection conn, long physicalNetworkId, long // they are made in lowercase. On upgrade change the host details name to lower case private void fixZoneUsingExternalDevices(Connection conn) { //Get zones to upgrade - List zoneIds = new ArrayList(); - ResultSet rs = null; + List zoneIds = new ArrayList<>(); long networkOfferingId, networkId; long f5DeviceId, f5HostId; long srxDevivceId, srxHostId; try(PreparedStatement sel_id_pstmt = - conn.prepareStatement("select id from `cloud`.`data_center` where lb_provider='F5BigIp' or firewall_provider='JuniperSRX' or gateway_provider='JuniperSRX'");) + conn.prepareStatement("select id from `cloud`.`data_center` where lb_provider='F5BigIp' or firewall_provider='JuniperSRX' or gateway_provider='JuniperSRX'")) { - try(ResultSet sel_id_rs = sel_id_pstmt.executeQuery();) { + try(ResultSet sel_id_rs = sel_id_pstmt.executeQuery()) { while (sel_id_rs.next()) { zoneIds.add(sel_id_rs.getLong(1)); } @@ -1583,14 +1523,14 @@ private void fixZoneUsingExternalDevices(Connection conn) { throw new CloudRuntimeException("fixZoneUsingExternalDevices:Exception:"+e.getMessage(), e); } - if (zoneIds.size() == 0) { + if (zoneIds.isEmpty()) { return; // no zones using F5 and SRX devices so return } // find the default network offering created for external devices during upgrade from 2.2.14 - try(PreparedStatement sel_id_off_pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='Isolated with external providers' ");) + try(PreparedStatement sel_id_off_pstmt = conn.prepareStatement("select id from `cloud`.`network_offerings` where unique_name='Isolated with external providers' ")) { - try(ResultSet sel_id_off_rs = sel_id_off_pstmt.executeQuery();) { + try(ResultSet sel_id_off_rs = sel_id_off_pstmt.executeQuery()) { if (sel_id_off_rs.first()) { networkOfferingId = sel_id_off_rs.getLong(1); } else { @@ -1605,9 +1545,9 @@ private void fixZoneUsingExternalDevices(Connection conn) { for (Long zoneId : zoneIds) { try { // find the F5 device id in the zone - try(PreparedStatement sel_id_host_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' AND removed IS NULL");) { + try(PreparedStatement sel_id_host_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalLoadBalancer' AND removed IS NULL")) { sel_id_host_pstmt.setLong(1, zoneId); - try(ResultSet sel_id_host_pstmt_rs = sel_id_host_pstmt.executeQuery();) { + try(ResultSet sel_id_host_pstmt_rs = sel_id_host_pstmt.executeQuery()) { if (sel_id_host_pstmt_rs.first()) { f5HostId = sel_id_host_pstmt_rs.getLong(1); } else { @@ -1619,9 +1559,9 @@ private void fixZoneUsingExternalDevices(Connection conn) { }catch (SQLException e) { throw new CloudRuntimeException("fixZoneUsingExternalDevices:Exception:"+e.getMessage(), e); } - try(PreparedStatement sel_id_ext_pstmt = conn.prepareStatement("SELECT id FROM external_load_balancer_devices WHERE host_id=?");) { + try(PreparedStatement sel_id_ext_pstmt = conn.prepareStatement("SELECT id FROM external_load_balancer_devices WHERE host_id=?")) { sel_id_ext_pstmt.setLong(1, f5HostId); - try(ResultSet sel_id_ext_rs = sel_id_ext_pstmt.executeQuery();) { + try(ResultSet sel_id_ext_rs = sel_id_ext_pstmt.executeQuery()) { if (sel_id_ext_rs.first()) { f5DeviceId = sel_id_ext_rs.getLong(1); } else { @@ -1636,9 +1576,9 @@ private void fixZoneUsingExternalDevices(Connection conn) { } // find the SRX device id in the zone - try(PreparedStatement sel_id_hostdc_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalFirewall' AND removed IS NULL");) { + try(PreparedStatement sel_id_hostdc_pstmt = conn.prepareStatement("SELECT id FROM host WHERE data_center_id=? AND type = 'ExternalFirewall' AND removed IS NULL")) { sel_id_hostdc_pstmt.setLong(1, zoneId); - try(ResultSet sel_id_hostdc_pstmt_rs = sel_id_hostdc_pstmt.executeQuery();) { + try(ResultSet sel_id_hostdc_pstmt_rs = sel_id_hostdc_pstmt.executeQuery()) { if (sel_id_hostdc_pstmt_rs.first()) { srxHostId = sel_id_hostdc_pstmt_rs.getLong(1); } else { @@ -1651,9 +1591,9 @@ private void fixZoneUsingExternalDevices(Connection conn) { throw new CloudRuntimeException("fixZoneUsingExternalDevices:Exception:"+e.getMessage(), e); } - try(PreparedStatement sel_id_ext_frwl_pstmt = conn.prepareStatement("SELECT id FROM external_firewall_devices WHERE host_id=?");) { + try(PreparedStatement sel_id_ext_frwl_pstmt = conn.prepareStatement("SELECT id FROM external_firewall_devices WHERE host_id=?")) { sel_id_ext_frwl_pstmt.setLong(1, srxHostId); - try(ResultSet sel_id_ext_frwl_pstmt_rs = sel_id_ext_frwl_pstmt.executeQuery();) { + try(ResultSet sel_id_ext_frwl_pstmt_rs = sel_id_ext_frwl_pstmt.executeQuery()) { if (sel_id_ext_frwl_pstmt_rs.first()) { srxDevivceId = sel_id_ext_frwl_pstmt_rs.getLong(1); } else { @@ -1669,10 +1609,10 @@ private void fixZoneUsingExternalDevices(Connection conn) { // check if network any uses F5 or SRX devices in the zone try(PreparedStatement sel_id_cloud_pstmt = - conn.prepareStatement("select id from `cloud`.`networks` where guest_type='Virtual' and data_center_id=? and network_offering_id=? and removed IS NULL");) { + conn.prepareStatement("select id from `cloud`.`networks` where guest_type='Virtual' and data_center_id=? and network_offering_id=? and removed IS NULL")) { sel_id_cloud_pstmt.setLong(1, zoneId); sel_id_cloud_pstmt.setLong(2, networkOfferingId); - try(ResultSet sel_id_cloud_pstmt_rs = sel_id_cloud_pstmt.executeQuery();) { + try(ResultSet sel_id_cloud_pstmt_rs = sel_id_cloud_pstmt.executeQuery()) { while (sel_id_cloud_pstmt_rs.next()) { // get the network Id networkId = sel_id_cloud_pstmt_rs.getLong(1); @@ -1680,7 +1620,7 @@ private void fixZoneUsingExternalDevices(Connection conn) { // add mapping for the network in network_external_lb_device_map String insertLbMapping = "INSERT INTO `cloud`.`network_external_lb_device_map` (uuid, network_id, external_load_balancer_device_id, created) VALUES ( ?, ?, ?, now())"; - try (PreparedStatement insert_lb_stmt = conn.prepareStatement(insertLbMapping);) { + try (PreparedStatement insert_lb_stmt = conn.prepareStatement(insertLbMapping)) { insert_lb_stmt.setString(1, UUID.randomUUID().toString()); insert_lb_stmt.setLong(2, networkId); insert_lb_stmt.setLong(3, f5DeviceId); @@ -1688,12 +1628,12 @@ private void fixZoneUsingExternalDevices(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); } - logger.debug("Successfully added entry in network_external_lb_device_map for network " + networkId + " and F5 device ID " + f5DeviceId); + logger.debug("Successfully added entry in network_external_lb_device_map for network {} and F5 device ID {}", networkId, f5DeviceId); // add mapping for the network in network_external_firewall_device_map String insertFwMapping = "INSERT INTO `cloud`.`network_external_firewall_device_map` (uuid, network_id, external_firewall_device_id, created) VALUES ( ?, ?, ?, now())"; - try (PreparedStatement insert_ext_firewall_stmt = conn.prepareStatement(insertFwMapping);) { + try (PreparedStatement insert_ext_firewall_stmt = conn.prepareStatement(insertFwMapping)) { insert_ext_firewall_stmt.setString(1, UUID.randomUUID().toString()); insert_ext_firewall_stmt.setLong(2, networkId); insert_ext_firewall_stmt.setLong(3, srxDevivceId); @@ -1701,7 +1641,7 @@ private void fixZoneUsingExternalDevices(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); } - logger.debug("Successfully added entry in network_external_firewall_device_map for network " + networkId + " and SRX device ID " + srxDevivceId); + logger.debug("Successfully added entry in network_external_firewall_device_map for network {} and SRX device ID {}", networkId, srxDevivceId); } }catch (SQLException e) { throw new CloudRuntimeException("Unable create a mapping for the networks in network_external_lb_device_map and network_external_firewall_device_map", e); @@ -1711,10 +1651,10 @@ private void fixZoneUsingExternalDevices(Connection conn) { } // update host details for F5 and SRX devices logger.debug("Updating the host details for F5 and SRX devices"); - try(PreparedStatement sel_pstmt = conn.prepareStatement("SELECT host_id, name FROM `cloud`.`host_details` WHERE host_id=? OR host_id=?");) { + try(PreparedStatement sel_pstmt = conn.prepareStatement("SELECT host_id, name FROM `cloud`.`host_details` WHERE host_id=? OR host_id=?")) { sel_pstmt.setLong(1, f5HostId); sel_pstmt.setLong(2, srxHostId); - try(ResultSet sel_rs = sel_pstmt.executeQuery();) { + try(ResultSet sel_rs = sel_pstmt.executeQuery()) { while (sel_rs.next()) { long hostId = sel_rs.getLong(1); String camlCaseName = sel_rs.getString(2); @@ -1723,7 +1663,7 @@ private void fixZoneUsingExternalDevices(Connection conn) { continue; } String lowerCaseName = camlCaseName.toLowerCase(); - try (PreparedStatement update_pstmt = conn.prepareStatement("update `cloud`.`host_details` set name=? where host_id=? AND name=?");) { + try (PreparedStatement update_pstmt = conn.prepareStatement("update `cloud`.`host_details` set name=? where host_id=? AND name=?")) { update_pstmt.setString(1, lowerCaseName); update_pstmt.setLong(2, hostId); update_pstmt.setString(3, camlCaseName); @@ -1750,7 +1690,6 @@ private void fixZoneUsingExternalDevices(Connection conn) { private void migrateSecondaryStorageToImageStore(Connection conn) { String sqlSelectS3Count = "select count(*) from `cloud`.`s3`"; String sqlSelectSwiftCount = "select count(*) from `cloud`.`swift`"; - String sqlInsertStoreDetail = "INSERT INTO `cloud`.`image_store_details` (store_id, name, value) values(?, ?, ?)"; String sqlUpdateHostAsRemoved = "UPDATE `cloud`.`host` SET removed = now() WHERE type = 'SecondaryStorage' and removed is null"; logger.debug("Migrating secondary storage to image store"); @@ -1758,7 +1697,6 @@ private void migrateSecondaryStorageToImageStore(Connection conn) { try ( PreparedStatement pstmtSelectS3Count = conn.prepareStatement(sqlSelectS3Count); PreparedStatement pstmtSelectSwiftCount = conn.prepareStatement(sqlSelectSwiftCount); - PreparedStatement storeDetailInsert = conn.prepareStatement(sqlInsertStoreDetail); PreparedStatement storeInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store` (id, uuid, name, image_provider_name, protocol, url, data_center_id, scope, role, parent, total_size, created, removed) values(?, ?, ?, 'NFS', 'nfs', ?, ?, 'ZONE', ?, ?, ?, ?, ?)"); PreparedStatement nfsQuery = @@ -1766,8 +1704,8 @@ private void migrateSecondaryStorageToImageStore(Connection conn) { PreparedStatement pstmtUpdateHostAsRemoved = conn.prepareStatement(sqlUpdateHostAsRemoved); ResultSet rsSelectS3Count = pstmtSelectS3Count.executeQuery(); ResultSet rsSelectSwiftCount = pstmtSelectSwiftCount.executeQuery(); - ResultSet rsNfs = nfsQuery.executeQuery(); - ) { + ResultSet rsNfs = nfsQuery.executeQuery() + ) { logger.debug("Checking if we need to migrate NFS secondary storage to image store or staging store"); int numRows = 0; if (rsSelectS3Count.next()) { @@ -1786,11 +1724,11 @@ private void migrateSecondaryStorageToImageStore(Connection conn) { store_role = "ImageCache"; } - logger.debug("Migrating NFS secondary storage to " + store_role + " store"); + logger.debug("Migrating NFS secondary storage to {} store", store_role); // migrate NFS secondary storage, for nfs, keep previous host_id as the store_id while (rsNfs.next()) { - Long nfs_id = rsNfs.getLong("id"); + long nfs_id = rsNfs.getLong("id"); String nfs_uuid = rsNfs.getString("uuid"); String nfs_url = rsNfs.getString("url"); String nfs_parent = rsNfs.getString("parent"); @@ -1832,19 +1770,19 @@ private void migrateSecondaryStorageToImageStore(Connection conn) { private void migrateVolumeHostRef(Connection conn) { logger.debug("Updating volume_store_ref table from volume_host_ref table"); try(PreparedStatement volStoreInsert = - conn.prepareStatement("INSERT INTO `cloud`.`volume_store_ref` (store_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, update_count, ref_cnt, state) select host_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, 0, 0, 'Allocated' from `cloud`.`volume_host_ref`");) + conn.prepareStatement("INSERT INTO `cloud`.`volume_store_ref` (store_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, update_count, ref_cnt, state) select host_id, volume_id, zone_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, checksum, error_str, local_path, install_path, url, destroyed, 0, 0, 'Allocated' from `cloud`.`volume_host_ref`")) { int rowCount = volStoreInsert.executeUpdate(); - logger.debug("Insert modified " + rowCount + " rows"); - try(PreparedStatement volStoreUpdate = conn.prepareStatement("update `cloud`.`volume_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'");) { + logger.debug(INSERT_MODIFIED_ROWS, rowCount); + try(PreparedStatement volStoreUpdate = conn.prepareStatement("update `cloud`.`volume_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'")) { rowCount = volStoreUpdate.executeUpdate(); - logger.debug("Update modified " + rowCount + " rows"); + logger.debug(UPDATE_MODIFIED_ROWS, rowCount); }catch (SQLException e) { - logger.error("Unable to migrate volume_host_ref." + e.getMessage(),e); + logger.error("Unable to migrate volume_host_ref.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to migrate volume_host_ref." + e.getMessage(),e); } } catch (SQLException e) { - logger.error("Unable to migrate volume_host_ref." + e.getMessage(),e); + logger.error("Unable to migrate volume_host_ref.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to migrate volume_host_ref." + e.getMessage(),e); } logger.debug("Completed updating volume_store_ref table from volume_host_ref table"); @@ -1854,20 +1792,20 @@ private void migrateVolumeHostRef(Connection conn) { private void migrateTemplateHostRef(Connection conn) { logger.debug("Updating template_store_ref table from template_host_ref table"); try (PreparedStatement tmplStoreInsert = - conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, update_count, ref_cnt, store_role, state) select host_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, 0, 0, 'Image', 'Allocated' from `cloud`.`template_host_ref`");) + conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, update_count, ref_cnt, store_role, state) select host_id, template_id, created, last_updated, job_id, download_pct, size, physical_size, download_state, error_str, local_path, install_path, url, destroyed, is_copy, 0, 0, 'Image', 'Allocated' from `cloud`.`template_host_ref`")) { int rowCount = tmplStoreInsert.executeUpdate(); - logger.debug("Insert modified " + rowCount + " rows"); + logger.debug(INSERT_MODIFIED_ROWS, rowCount); - try(PreparedStatement tmplStoreUpdate = conn.prepareStatement("update `cloud`.`template_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'");) { + try(PreparedStatement tmplStoreUpdate = conn.prepareStatement("update `cloud`.`template_store_ref` set state = 'Ready' where download_state = 'DOWNLOADED'")) { rowCount = tmplStoreUpdate.executeUpdate(); }catch (SQLException e) { - logger.error("Unable to migrate template_host_ref." + e.getMessage(),e); + logger.error("Unable to migrate template_host_ref.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to migrate template_host_ref." + e.getMessage(), e); } - logger.debug("Update modified " + rowCount + " rows"); + logger.debug(UPDATE_MODIFIED_ROWS, rowCount); } catch (SQLException e) { - logger.error("Unable to migrate template_host_ref." + e.getMessage(),e); + logger.error("Unable to migrate template_host_ref.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to migrate template_host_ref." + e.getMessage(), e); } logger.debug("Completed updating template_store_ref table from template_host_ref table"); @@ -1877,22 +1815,22 @@ private void migrateTemplateHostRef(Connection conn) { private void migrateSnapshotStoreRef(Connection conn) { logger.debug("Updating snapshot_store_ref table from snapshots table"); try(PreparedStatement snapshotStoreInsert = - conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and sechost_id is not null and removed is null"); + conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and sechost_id is not null and removed is null") ) { //Update all snapshots except KVM snapshots int rowCount = snapshotStoreInsert.executeUpdate(); - logger.debug("Inserted " + rowCount + " snapshots into snapshot_store_ref"); + logger.debug("Inserted {} snapshots into snapshot_store_ref", rowCount); //backsnap_id for KVM snapshots is complete path. CONCAT is not required try(PreparedStatement snapshotStoreInsert_2 = - conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, backup_snap_id, volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type = 'KVM' and sechost_id is not null and removed is null");) { + conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) select sechost_id, id, created, size, prev_snap_id, backup_snap_id, volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type = 'KVM' and sechost_id is not null and removed is null")) { rowCount = snapshotStoreInsert_2.executeUpdate(); - logger.debug("Inserted " + rowCount + " KVM snapshots into snapshot_store_ref"); + logger.debug("Inserted {} KVM snapshots into snapshot_store_ref", rowCount); }catch (SQLException e) { - logger.error("Unable to migrate snapshot_store_ref." + e.getMessage(),e); + logger.error("Unable to migrate snapshot_store_ref.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to migrate snapshot_store_ref." + e.getMessage(),e); } } catch (SQLException e) { - logger.error("Unable to migrate snapshot_store_ref." + e.getMessage(),e); + logger.error("Unable to migrate snapshot_store_ref.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to migrate snapshot_store_ref." + e.getMessage(),e); } logger.debug("Completed updating snapshot_store_ref table from snapshots table"); @@ -1901,7 +1839,7 @@ private void migrateSnapshotStoreRef(Connection conn) { // migrate secondary storages S3 from s3 tables to image_store table private void migrateS3ToImageStore(Connection conn) { Long storeId = null; - Map s3_store_id_map = new HashMap(); + Map s3_store_id_map = new HashMap<>(); logger.debug("Migrating S3 to image store"); try ( @@ -1913,8 +1851,8 @@ private void migrateS3ToImageStore(Connection conn) { "values(?, ?, 'S3', ?, 'REGION', 'Image', ?)"); PreparedStatement s3Query = conn.prepareStatement("select id, uuid, access_key, secret_key, end_point, bucket, https, connection_timeout, " + "max_error_retry, socket_timeout, created from `cloud`.`s3`"); - ResultSet rs = s3Query.executeQuery(); - ) { + ResultSet rs = s3Query.executeQuery() + ) { while (rs.next()) { Long s3_id = rs.getLong("id"); @@ -1923,7 +1861,7 @@ private void migrateS3ToImageStore(Connection conn) { String s3_secretkey = rs.getString("secret_key"); String s3_endpoint = rs.getString("end_point"); String s3_bucket = rs.getString("bucket"); - boolean s3_https = rs.getObject("https") != null ? (rs.getInt("https") == 0 ? false : true) : false; + boolean s3_https = rs.getObject("https") != null && (rs.getInt("https") != 0); Integer s3_connectiontimeout = rs.getObject("connection_timeout") != null ? rs.getInt("connection_timeout") : null; Integer s3_retry = rs.getObject("max_error_retry") != null ? rs.getInt("max_error_retry") : null; Integer s3_sockettimeout = rs.getObject("socket_timeout") != null ? rs.getInt("socket_timeout") : null; @@ -1939,13 +1877,13 @@ private void migrateS3ToImageStore(Connection conn) { storeInsert.executeUpdate(); storeQuery.setString(1, s3_uuid); - try (ResultSet storeInfo = storeQuery.executeQuery();) { + try (ResultSet storeInfo = storeQuery.executeQuery()) { if (storeInfo.next()) { storeId = storeInfo.getLong("id"); } } - Map detailMap = new HashMap(); + Map detailMap = new HashMap<>(); detailMap.put(ApiConstants.S3_ACCESS_KEY, s3_accesskey); detailMap.put(ApiConstants.S3_SECRET_KEY, s3_secretkey); detailMap.put(ApiConstants.S3_BUCKET_NAME, s3_bucket); @@ -1961,9 +1899,7 @@ private void migrateS3ToImageStore(Connection conn) { detailMap.put(ApiConstants.S3_SOCKET_TIMEOUT, String.valueOf(s3_sockettimeout)); } - Iterator keyIt = detailMap.keySet().iterator(); - while (keyIt.hasNext()) { - String key = keyIt.next(); + for (String key : detailMap.keySet()) { String val = detailMap.get(key); storeDetailInsert.setLong(1, storeId); storeDetailInsert.setString(2, key); @@ -1991,18 +1927,18 @@ private void migrateS3ToImageStore(Connection conn) { private void migrateTemplateS3Ref(Connection conn, Map s3StoreMap) { logger.debug("Updating template_store_ref table from template_s3_ref table"); try(PreparedStatement tmplStoreInsert = - conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')"); + conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')") ) { try(PreparedStatement s3Query = - conn.prepareStatement("select template_s3_ref.s3_id, template_s3_ref.template_id, template_s3_ref.created, template_s3_ref.size, template_s3_ref.physical_size, vm_template.account_id from `cloud`.`template_s3_ref`, `cloud`.`vm_template` where vm_template.id = template_s3_ref.template_id");) { - try(ResultSet rs = s3Query.executeQuery();) { + conn.prepareStatement("select template_s3_ref.s3_id, template_s3_ref.template_id, template_s3_ref.created, template_s3_ref.size, template_s3_ref.physical_size, vm_template.account_id from `cloud`.`template_s3_ref`, `cloud`.`vm_template` where vm_template.id = template_s3_ref.template_id")) { + try(ResultSet rs = s3Query.executeQuery()) { while (rs.next()) { Long s3_id = rs.getLong("s3_id"); - Long s3_tmpl_id = rs.getLong("template_id"); + long s3_tmpl_id = rs.getLong("template_id"); Date s3_created = rs.getDate("created"); Long s3_size = rs.getObject("size") != null ? rs.getLong("size") : null; Long s3_psize = rs.getObject("physical_size") != null ? rs.getLong("physical_size") : null; - Long account_id = rs.getLong("account_id"); + long account_id = rs.getLong("account_id"); tmplStoreInsert.setLong(1, s3StoreMap.get(s3_id)); tmplStoreInsert.setLong(2, s3_tmpl_id); tmplStoreInsert.setDate(3, s3_created); @@ -2022,15 +1958,15 @@ private void migrateTemplateS3Ref(Connection conn, Map s3StoreMap) { tmplStoreInsert.executeUpdate(); } }catch (SQLException e) { - logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e); + logger.error("Unable to migrate template_s3_ref.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e); } }catch (SQLException e) { - logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e); + logger.error("Unable to migrate template_s3_ref.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e); } } catch (SQLException e) { - logger.error("Unable to migrate template_s3_ref." + e.getMessage(),e); + logger.error("Unable to migrate template_s3_ref.{}", e.getMessage(), e); throw new CloudRuntimeException("Unable to migrate template_s3_ref." + e.getMessage(),e); } logger.debug("Completed migrating template_s3_ref table."); @@ -2040,19 +1976,19 @@ private void migrateTemplateS3Ref(Connection conn, Map s3StoreMap) { private void migrateSnapshotS3Ref(Connection conn, Map s3StoreMap) { logger.debug("Updating snapshot_store_ref table from snapshots table for s3"); try(PreparedStatement snapshotStoreInsert = - conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')"); + conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')") ) { try(PreparedStatement s3Query = - conn.prepareStatement("select s3_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and s3_id is not null and removed is null");) { - try(ResultSet rs = s3Query.executeQuery();) { + conn.prepareStatement("select s3_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and s3_id is not null and removed is null")) { + try(ResultSet rs = s3Query.executeQuery()) { while (rs.next()) { Long s3_id = rs.getLong("s3_id"); - Long snapshot_id = rs.getLong("id"); + long snapshot_id = rs.getLong("id"); Date s3_created = rs.getDate("created"); Long s3_size = rs.getObject("size") != null ? rs.getLong("size") : null; Long s3_prev_id = rs.getObject("prev_snap_id") != null ? rs.getLong("prev_snap_id") : null; String install_path = rs.getString(6); - Long s3_vol_id = rs.getLong("volume_id"); + long s3_vol_id = rs.getLong("volume_id"); snapshotStoreInsert.setLong(1, s3StoreMap.get(s3_id)); snapshotStoreInsert.setLong(2, snapshot_id); @@ -2072,15 +2008,15 @@ private void migrateSnapshotS3Ref(Connection conn, Map s3StoreMap) { snapshotStoreInsert.executeUpdate(); } }catch (SQLException e) { - logger.error("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e); + logger.error("migrateSnapshotS3Ref:Exception:{}", e.getMessage(), e); throw new CloudRuntimeException("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e); } }catch (SQLException e) { - logger.error("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e); + logger.error("migrateSnapshotS3Ref:Exception:{}", e.getMessage(), e); throw new CloudRuntimeException("migrateSnapshotS3Ref:Exception:"+e.getMessage(),e); } } catch (SQLException e) { - logger.error("Unable to migrate s3 backedup snapshots to snapshot_store_ref." + e.getMessage()); + logger.error("Unable to migrate s3 backedup snapshots to snapshot_store_ref.{}", e.getMessage()); throw new CloudRuntimeException("Unable to migrate s3 backedup snapshots to snapshot_store_ref." + e.getMessage(), e); } logger.debug("Completed updating snapshot_store_ref table from s3 snapshots entries"); @@ -2089,7 +2025,7 @@ private void migrateSnapshotS3Ref(Connection conn, Map s3StoreMap) { // migrate secondary storages Swift from swift tables to image_store table private void migrateSwiftToImageStore(Connection conn) { Long storeId = null; - Map swift_store_id_map = new HashMap(); + Map swift_store_id_map = new HashMap<>(); logger.debug("Migrating Swift to image store"); try ( @@ -2100,8 +2036,8 @@ private void migrateSwiftToImageStore(Connection conn) { PreparedStatement storeInsert = conn.prepareStatement("INSERT INTO `cloud`.`image_store` (uuid, name, image_provider_name, protocol, url, scope, role, created) values(?, ?, 'Swift', 'http', ?, 'REGION', 'Image', ?)"); PreparedStatement swiftQuery = conn.prepareStatement("select id, uuid, url, account, username, swift.key, created from `cloud`.`swift`"); - ResultSet rs = swiftQuery.executeQuery(); - ) { + ResultSet rs = swiftQuery.executeQuery() + ) { while (rs.next()) { Long swift_id = rs.getLong("id"); String swift_uuid = rs.getString("uuid"); @@ -2120,20 +2056,18 @@ private void migrateSwiftToImageStore(Connection conn) { storeInsert.executeUpdate(); storeQuery.setString(1, swift_uuid); - try (ResultSet storeInfo = storeQuery.executeQuery();) { + try (ResultSet storeInfo = storeQuery.executeQuery()) { if (storeInfo.next()) { storeId = storeInfo.getLong("id"); } } - Map detailMap = new HashMap(); + Map detailMap = new HashMap<>(); detailMap.put(ApiConstants.ACCOUNT, swift_account); detailMap.put(ApiConstants.USERNAME, swift_username); detailMap.put(ApiConstants.KEY, swift_key); - Iterator keyIt = detailMap.keySet().iterator(); - while (keyIt.hasNext()) { - String key = keyIt.next(); + for (String key : detailMap.keySet()) { String val = detailMap.get(key); storeDetailInsert.setLong(1, storeId); storeDetailInsert.setString(2, key); @@ -2164,11 +2098,11 @@ private void migrateTemplateSwiftRef(Connection conn, Map swiftStore PreparedStatement tmplStoreInsert = conn.prepareStatement("INSERT INTO `cloud`.`template_store_ref` (store_id, template_id, created, download_pct, size, physical_size, download_state, local_path, install_path, update_count, ref_cnt, store_role, state) values(?, ?, ?, 100, ?, ?, 'DOWNLOADED', '?', '?', 0, 0, 'Image', 'Ready')"); PreparedStatement s3Query = conn.prepareStatement("select swift_id, template_id, created, path, size, physical_size from `cloud`.`template_swift_ref`"); - ResultSet rs = s3Query.executeQuery(); - ) { + ResultSet rs = s3Query.executeQuery() + ) { while (rs.next()) { Long swift_id = rs.getLong("swift_id"); - Long tmpl_id = rs.getLong("template_id"); + long tmpl_id = rs.getLong("template_id"); Date created = rs.getDate("created"); String path = rs.getString("path"); Long size = rs.getObject("size") != null ? rs.getLong("size") : null; @@ -2203,19 +2137,19 @@ private void migrateTemplateSwiftRef(Connection conn, Map swiftStore private void migrateSnapshotSwiftRef(Connection conn, Map swiftStoreMap) { logger.debug("Updating snapshot_store_ref table from snapshots table for swift"); try (PreparedStatement snapshotStoreInsert = - conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')"); + conn.prepareStatement("INSERT INTO `cloud`.`snapshot_store_ref` (store_id, snapshot_id, created, size, parent_snapshot_id, install_path, volume_id, update_count, ref_cnt, store_role, state) values(?, ?, ?, ?, ?, ?, ?, 0, 0, 'Image', 'Ready')") ){ try(PreparedStatement s3Query = - conn.prepareStatement("select swift_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and swift_id is not null and removed is null");) { - try(ResultSet rs = s3Query.executeQuery();) { + conn.prepareStatement("select swift_id, id, created, size, prev_snap_id, CONCAT('snapshots', '/', account_id, '/', volume_id, '/', backup_snap_id), volume_id, 0, 0, 'Image', 'Ready' from `cloud`.`snapshots` where status = 'BackedUp' and hypervisor_type <> 'KVM' and swift_id is not null and removed is null")) { + try(ResultSet rs = s3Query.executeQuery()) { while (rs.next()) { Long swift_id = rs.getLong("swift_id"); - Long snapshot_id = rs.getLong("id"); + long snapshot_id = rs.getLong("id"); Date created = rs.getDate("created"); - Long size = rs.getLong("size"); - Long prev_id = rs.getLong("prev_snap_id"); + long size = rs.getLong("size"); + long prev_id = rs.getLong("prev_snap_id"); String install_path = rs.getString(6); - Long vol_id = rs.getLong("volume_id"); + long vol_id = rs.getLong("volume_id"); snapshotStoreInsert.setLong(1, swiftStoreMap.get(swift_id)); snapshotStoreInsert.setLong(2, snapshot_id); @@ -2227,15 +2161,15 @@ private void migrateSnapshotSwiftRef(Connection conn, Map swiftStore snapshotStoreInsert.executeUpdate(); } }catch (SQLException e) { - logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); + logger.error("migrateSnapshotSwiftRef:Exception:{}", e.getMessage(), e); throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); } }catch (SQLException e) { - logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); + logger.error("migrateSnapshotSwiftRef:Exception:{}", e.getMessage(), e); throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); } } catch (SQLException e) { - logger.error("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); + logger.error("migrateSnapshotSwiftRef:Exception:{}", e.getMessage(), e); throw new CloudRuntimeException("migrateSnapshotSwiftRef:Exception:"+e.getMessage(),e); } logger.debug("Completed updating snapshot_store_ref table from swift snapshots entries"); @@ -2243,12 +2177,12 @@ private void migrateSnapshotSwiftRef(Connection conn, Map swiftStore private void fixNiciraKeys(Connection conn) { //First drop the key if it exists. - List keys = new ArrayList(); + List keys = new ArrayList<>(); logger.debug("Dropping foreign key fk_nicira_nvp_nic_map__nic from the table nicira_nvp_nic_map if it exists"); keys.add("fk_nicira_nvp_nic_map__nic"); DbUpgradeUtils.dropKeysIfExist(conn, "nicira_nvp_nic_map", keys, true); //Now add foreign key. - try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`nicira_nvp_nic_map` ADD CONSTRAINT `fk_nicira_nvp_nic_map__nic` FOREIGN KEY (`nic`) REFERENCES `nics` (`uuid`) ON DELETE CASCADE");) + try(PreparedStatement pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`nicira_nvp_nic_map` ADD CONSTRAINT `fk_nicira_nvp_nic_map__nic` FOREIGN KEY (`nic`) REFERENCES `nics` (`uuid`) ON DELETE CASCADE")) { pstmt.executeUpdate(); logger.debug("Added foreign key fk_nicira_nvp_nic_map__nic to the table nicira_nvp_nic_map"); @@ -2259,13 +2193,13 @@ private void fixNiciraKeys(Connection conn) { private void fixRouterKeys(Connection conn) { //First drop the key if it exists. - List keys = new ArrayList(); + List keys = new ArrayList<>(); logger.debug("Dropping foreign key fk_router_network_ref__router_id from the table router_network_ref if it exists"); keys.add("fk_router_network_ref__router_id"); DbUpgradeUtils.dropKeysIfExist(conn, "router_network_ref", keys, true); //Now add foreign key. try (PreparedStatement pstmt = - conn.prepareStatement("ALTER TABLE `cloud`.`router_network_ref` ADD CONSTRAINT `fk_router_network_ref__router_id` FOREIGN KEY (`router_id`) REFERENCES `domain_router` (`id`) ON DELETE CASCADE");) + conn.prepareStatement("ALTER TABLE `cloud`.`router_network_ref` ADD CONSTRAINT `fk_router_network_ref__router_id` FOREIGN KEY (`router_id`) REFERENCES `domain_router` (`id`) ON DELETE CASCADE")) { pstmt.executeUpdate(); logger.debug("Added foreign key fk_router_network_ref__router_id to the table router_network_ref"); @@ -2276,8 +2210,8 @@ private void fixRouterKeys(Connection conn) { private void encryptSite2SitePSK(Connection conn) { logger.debug("Encrypting Site2Site Customer Gateway pre-shared key"); - try (PreparedStatement select_pstmt = conn.prepareStatement("select id, ipsec_psk from `cloud`.`s2s_customer_gateway`");){ - try(ResultSet rs = select_pstmt.executeQuery();) + try (PreparedStatement select_pstmt = conn.prepareStatement("select id, ipsec_psk from `cloud`.`s2s_customer_gateway`")){ + try(ResultSet rs = select_pstmt.executeQuery()) { while (rs.next()) { long id = rs.getLong(1); @@ -2286,7 +2220,7 @@ private void encryptSite2SitePSK(Connection conn) { continue; } String encryptedValue = DBEncryptionUtil.encrypt(value); - try(PreparedStatement update_pstmt = conn.prepareStatement("update `cloud`.`s2s_customer_gateway` set ipsec_psk=? where id=?");) { + try(PreparedStatement update_pstmt = conn.prepareStatement("update `cloud`.`s2s_customer_gateway` set ipsec_psk=? where id=?")) { update_pstmt.setBytes(1, encryptedValue.getBytes("UTF-8")); update_pstmt.setLong(2, id); update_pstmt.executeUpdate(); @@ -2297,9 +2231,7 @@ private void encryptSite2SitePSK(Connection conn) { }catch (SQLException e) { throw new CloudRuntimeException("encryptSite2SitePSK:Exception:"+e.getMessage(), e); } - } catch (SQLException e) { - throw new CloudRuntimeException("Unable to encrypt Site2Site Customer Gateway pre-shared key ", e); - } catch (UnsupportedEncodingException e) { + } catch (SQLException | UnsupportedEncodingException e) { throw new CloudRuntimeException("Unable to encrypt Site2Site Customer Gateway pre-shared key ", e); } logger.debug("Done encrypting Site2Site Customer Gateway pre-shared key"); @@ -2308,12 +2240,12 @@ private void encryptSite2SitePSK(Connection conn) { protected void updateConcurrentConnectionsInNetworkOfferings(Connection conn) { try { try (PreparedStatement sel_pstmt = - conn.prepareStatement("SELECT * FROM information_schema.COLUMNS WHERE TABLE_SCHEMA = 'cloud' AND TABLE_NAME = 'network_offerings' AND COLUMN_NAME = 'concurrent_connections'");) + conn.prepareStatement("SELECT * FROM information_schema.COLUMNS WHERE TABLE_SCHEMA = 'cloud' AND TABLE_NAME = 'network_offerings' AND COLUMN_NAME = 'concurrent_connections'")) { - try(ResultSet rs = sel_pstmt.executeQuery();) { + try(ResultSet rs = sel_pstmt.executeQuery()) { if (!rs.next()) { try(PreparedStatement alter_pstmt = - conn.prepareStatement("ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `concurrent_connections` int(10) unsigned COMMENT 'Load Balancer(haproxy) maximum number of concurrent connections(global max)'");) { + conn.prepareStatement("ALTER TABLE `cloud`.`network_offerings` ADD COLUMN `concurrent_connections` int(10) unsigned COMMENT 'Load Balancer(haproxy) maximum number of concurrent connections(global max)'")) { alter_pstmt.executeUpdate(); }catch (SQLException e) { throw new CloudRuntimeException("migration of concurrent connections from network_details failed"); @@ -2325,23 +2257,23 @@ protected void updateConcurrentConnectionsInNetworkOfferings(Connection conn) { } catch (SQLException e) { throw new CloudRuntimeException("migration of concurrent connections from network_details failed"); } - try(PreparedStatement sel_net_pstmt = conn.prepareStatement("select network_id, value from `cloud`.`network_details` where name='maxconnections'");) + try(PreparedStatement sel_net_pstmt = conn.prepareStatement("select network_id, value from `cloud`.`network_details` where name='maxconnections'")) { - try(ResultSet rs = sel_net_pstmt.executeQuery();) { + try(ResultSet rs = sel_net_pstmt.executeQuery()) { while (rs.next()) { long networkId = rs.getLong(1); int maxconnections = Integer.parseInt(rs.getString(2)); - try(PreparedStatement sel_net_off_pstmt = conn.prepareStatement("select network_offering_id from `cloud`.`networks` where id= ?");) { + try(PreparedStatement sel_net_off_pstmt = conn.prepareStatement("select network_offering_id from `cloud`.`networks` where id= ?")) { sel_net_off_pstmt.setLong(1, networkId); - try(ResultSet rs1 = sel_net_off_pstmt.executeQuery();) { + try(ResultSet rs1 = sel_net_off_pstmt.executeQuery()) { if (rs1.next()) { long network_offering_id = rs1.getLong(1); - try(PreparedStatement pstmt = conn.prepareStatement("select concurrent_connections from `cloud`.`network_offerings` where id= ?");) + try(PreparedStatement pstmt = conn.prepareStatement("select concurrent_connections from `cloud`.`network_offerings` where id= ?")) { pstmt.setLong(1, network_offering_id); - try(ResultSet rs2 = pstmt.executeQuery();) { + try(ResultSet rs2 = pstmt.executeQuery()) { if ((!rs2.next()) || (rs2.getInt(1) < maxconnections)) { - try(PreparedStatement update_net_pstmt = conn.prepareStatement("update network_offerings set concurrent_connections=? where id=?");) + try(PreparedStatement update_net_pstmt = conn.prepareStatement("update network_offerings set concurrent_connections=? where id=?")) { update_net_pstmt.setInt(1, maxconnections); update_net_pstmt.setLong(2, network_offering_id); @@ -2376,24 +2308,24 @@ protected void updateConcurrentConnectionsInNetworkOfferings(Connection conn) { } private void migrateDatafromIsoIdInVolumesTable(Connection conn) { - try(PreparedStatement pstmt = conn.prepareStatement("SELECT iso_id1 From `cloud`.`volumes`");) + try(PreparedStatement pstmt = conn.prepareStatement("SELECT iso_id1 From `cloud`.`volumes`")) { - try(ResultSet rs = pstmt.executeQuery();) { + try(ResultSet rs = pstmt.executeQuery()) { if (rs.next()) { - try(PreparedStatement alter_pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`volumes` DROP COLUMN `iso_id`");) { + try(PreparedStatement alter_pstmt = conn.prepareStatement("ALTER TABLE `cloud`.`volumes` DROP COLUMN `iso_id`")) { alter_pstmt.executeUpdate(); try(PreparedStatement alter_iso_pstmt = - conn.prepareStatement("ALTER TABLE `cloud`.`volumes` CHANGE COLUMN `iso_id1` `iso_id` bigint(20) unsigned COMMENT 'The id of the iso from which the volume was created'");) { + conn.prepareStatement("ALTER TABLE `cloud`.`volumes` CHANGE COLUMN `iso_id1` `iso_id` bigint(20) unsigned COMMENT 'The id of the iso from which the volume was created'")) { alter_iso_pstmt.executeUpdate(); }catch (SQLException e) { - logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); + logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: {}", e.getMessage()); if (logger.isTraceEnabled()) { logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); } //implies iso_id1 is not present, so do nothing. } }catch (SQLException e) { - logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); + logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: {}", e.getMessage()); if (logger.isTraceEnabled()) { logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); } @@ -2401,14 +2333,14 @@ private void migrateDatafromIsoIdInVolumesTable(Connection conn) { } } }catch (SQLException e) { - logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); + logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: {}", e.getMessage()); if (logger.isTraceEnabled()) { logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); } //implies iso_id1 is not present, so do nothing. } } catch (SQLException e) { - logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: " + e.getMessage()); + logger.info("migrateDatafromIsoIdInVolumesTable: ignoring Exception: {}", e.getMessage()); if (logger.isTraceEnabled()) { logger.trace("migrateDatafromIsoIdInVolumesTable: ignored Exception",e); } @@ -2417,7 +2349,7 @@ private void migrateDatafromIsoIdInVolumesTable(Connection conn) { } protected void setRAWformatForRBDVolumes(Connection conn) { - try(PreparedStatement pstmt = conn.prepareStatement("UPDATE volumes SET format = 'RAW' WHERE pool_id IN(SELECT id FROM storage_pool WHERE pool_type = 'RBD')");) + try(PreparedStatement pstmt = conn.prepareStatement("UPDATE volumes SET format = 'RAW' WHERE pool_id IN(SELECT id FROM storage_pool WHERE pool_type = 'RBD')")) { logger.debug("Setting format to RAW for all volumes on RBD primary storage pools"); pstmt.executeUpdate(); @@ -2428,23 +2360,23 @@ protected void setRAWformatForRBDVolumes(Connection conn) { private void upgradeVpcServiceMap(Connection conn) { logger.debug("Upgrading VPC service Map"); - try(PreparedStatement listVpc = conn.prepareStatement("SELECT id, vpc_offering_id FROM `cloud`.`vpc` where removed is NULL");) + try(PreparedStatement listVpc = conn.prepareStatement("SELECT id, vpc_offering_id FROM `cloud`.`vpc` where removed is NULL")) { //Get all vpc Ids along with vpc offering Id - try(ResultSet rs = listVpc.executeQuery();) { + try(ResultSet rs = listVpc.executeQuery()) { while (rs.next()) { long vpc_id = rs.getLong(1); long offering_id = rs.getLong(2); //list all services and providers in offering - try(PreparedStatement listServiceProviders = conn.prepareStatement("SELECT service, provider FROM `cloud`.`vpc_offering_service_map` where vpc_offering_id = ?");) { + try(PreparedStatement listServiceProviders = conn.prepareStatement("SELECT service, provider FROM `cloud`.`vpc_offering_service_map` where vpc_offering_id = ?")) { listServiceProviders.setLong(1, offering_id); - try(ResultSet rs1 = listServiceProviders.executeQuery();) { + try(ResultSet rs1 = listServiceProviders.executeQuery()) { //Insert entries in vpc_service_map while (rs1.next()) { String service = rs1.getString(1); String provider = rs1.getString(2); try (PreparedStatement insertProviders = - conn.prepareStatement("INSERT INTO `cloud`.`vpc_service_map` (`vpc_id`, `service`, `provider`, `created`) VALUES (?, ?, ?, now());");) { + conn.prepareStatement("INSERT INTO `cloud`.`vpc_service_map` (`vpc_id`, `service`, `provider`, `created`) VALUES (?, ?, ?, now());")) { insertProviders.setLong(1, vpc_id); insertProviders.setString(2, service); insertProviders.setString(3, provider); @@ -2459,7 +2391,7 @@ private void upgradeVpcServiceMap(Connection conn) { }catch (SQLException e) { throw new CloudRuntimeException("Error during VPC service map upgrade", e); } - logger.debug("Upgraded service map for VPC: " + vpc_id); + logger.debug("Upgraded service map for VPC: {}", vpc_id); } } } catch (SQLException e) { @@ -2471,8 +2403,8 @@ private void upgradeResourceCount(Connection conn) { logger.debug("upgradeResourceCount start"); try( PreparedStatement sel_dom_pstmt = conn.prepareStatement("select id, domain_id FROM `cloud`.`account` where removed is NULL "); - ResultSet rsAccount = sel_dom_pstmt.executeQuery(); - ) { + ResultSet rsAccount = sel_dom_pstmt.executeQuery() + ) { while (rsAccount.next()) { long account_id = rsAccount.getLong(1); long domain_id = rsAccount.getLong(2); @@ -2480,9 +2412,9 @@ private void upgradeResourceCount(Connection conn) { try(PreparedStatement sel_sum_pstmt = conn.prepareStatement("SELECT SUM(service_offering.cpu), SUM(service_offering.ram_size)" + " FROM `cloud`.`vm_instance`, `cloud`.`service_offering`" + " WHERE vm_instance.service_offering_id = service_offering.id AND vm_instance.account_id = ?" + " AND vm_instance.removed is NULL" - + " AND vm_instance.vm_type='User' AND state not in ('Destroyed', 'Error', 'Expunging')");) { + + " AND vm_instance.vm_type='User' AND state not in ('Destroyed', 'Error', 'Expunging')")) { sel_sum_pstmt.setLong(1, account_id); - try(ResultSet sel_sum_pstmt_res = sel_sum_pstmt.executeQuery();) { + try(ResultSet sel_sum_pstmt_res = sel_sum_pstmt.executeQuery()) { if (sel_sum_pstmt_res.next()) { upgradeResourceCountforAccount(conn, account_id, domain_id, "cpu", sel_sum_pstmt_res.getLong(1)); upgradeResourceCountforAccount(conn, account_id, domain_id, "memory", sel_sum_pstmt_res.getLong(2)); @@ -2494,9 +2426,9 @@ private void upgradeResourceCount(Connection conn) { try(PreparedStatement sel_cloud_vol_pstmt = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?" + " AND (path is not NULL OR state in ('Allocated')) AND removed is NULL" - + " AND instance_id IN (SELECT id FROM `cloud`.`vm_instance` WHERE vm_type='User')");) { + + " AND instance_id IN (SELECT id FROM `cloud`.`vm_instance` WHERE vm_type='User')")) { sel_cloud_vol_pstmt.setLong(1, account_id); - try(ResultSet sel_cloud_vol_count = sel_cloud_vol_pstmt.executeQuery();) { + try(ResultSet sel_cloud_vol_count = sel_cloud_vol_pstmt.executeQuery()) { if (sel_cloud_vol_count.next()) { upgradeResourceCountforAccount(conn, account_id, domain_id, "primary_storage", sel_cloud_vol_count.getLong(1)); } else { @@ -2514,24 +2446,24 @@ private void upgradeResourceCount(Connection conn) { long totalTemplatesSize = 0; try(PreparedStatement sel_cloud_vol_alloc_pstmt = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`volumes` WHERE account_id= ?" - + " AND path is NULL AND state not in ('Allocated') AND removed is NULL");) { + + " AND path is NULL AND state not in ('Allocated') AND removed is NULL")) { sel_cloud_vol_alloc_pstmt.setLong(1, account_id); - try(ResultSet sel_cloud_vol_res = sel_cloud_vol_alloc_pstmt.executeQuery();) { + try(ResultSet sel_cloud_vol_res = sel_cloud_vol_alloc_pstmt.executeQuery()) { if (sel_cloud_vol_res.next()) { totalVolumesSize = sel_cloud_vol_res.getLong(1); } - try(PreparedStatement sel_cloud_snapshot_pstmt = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`snapshots` WHERE account_id= ? AND removed is NULL");) + try(PreparedStatement sel_cloud_snapshot_pstmt = conn.prepareStatement("SELECT sum(size) FROM `cloud`.`snapshots` WHERE account_id= ? AND removed is NULL")) { sel_cloud_snapshot_pstmt.setLong(1, account_id); - try(ResultSet sel_cloud_snapshot_res = sel_cloud_snapshot_pstmt.executeQuery();) { + try(ResultSet sel_cloud_snapshot_res = sel_cloud_snapshot_pstmt.executeQuery()) { if (sel_cloud_snapshot_res.next()) { totalSnapshotsSize = sel_cloud_snapshot_res.getLong(1); } try (PreparedStatement sel_templ_store_pstmt = conn.prepareStatement("SELECT sum(template_store_ref.size) FROM `cloud`.`template_store_ref`,`cloud`.`vm_template` WHERE account_id = ?" - + " AND template_store_ref.template_id = vm_template.id AND download_state = 'DOWNLOADED' AND destroyed = false AND removed is NULL");) { + + " AND template_store_ref.template_id = vm_template.id AND download_state = 'DOWNLOADED' AND destroyed = false AND removed is NULL")) { sel_templ_store_pstmt.setLong(1, account_id); - try (ResultSet templ_store_count = sel_templ_store_pstmt.executeQuery();) { + try (ResultSet templ_store_count = sel_templ_store_pstmt.executeQuery()) { if (templ_store_count.next()) { totalTemplatesSize = templ_store_count.getLong(1); } @@ -2562,13 +2494,12 @@ private void upgradeResourceCount(Connection conn) { } } // 4. upgrade cpu,memory,primary_storage,secondary_storage for domains - String resource_types[] = {"cpu", "memory", "primary_storage", "secondary_storage"}; - try(PreparedStatement sel_id_pstmt = conn.prepareStatement("select id FROM `cloud`.`domain`");) { - try(ResultSet sel_id_res = sel_id_pstmt.executeQuery();) { + String[] resource_types = {"cpu", "memory", "primary_storage", "secondary_storage"}; + try(PreparedStatement sel_id_pstmt = conn.prepareStatement("select id FROM `cloud`.`domain`")) { + try(ResultSet sel_id_res = sel_id_pstmt.executeQuery()) { while (sel_id_res.next()) { long domain_id = sel_id_res.getLong(1); - for (int count = 0; count < resource_types.length; count++) { - String resource_type = resource_types[count]; + for (String resource_type : resource_types) { upgradeResourceCountforDomain(conn, domain_id, resource_type, 0L); // reset value to 0 before statistics } } @@ -2578,22 +2509,21 @@ private void upgradeResourceCount(Connection conn) { }catch (SQLException e) { throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e); } - for (int count = 0; count < resource_types.length; count++) { - String resource_type = resource_types[count]; - try(PreparedStatement sel_dom_id_pstmt = - conn.prepareStatement("select account.domain_id,sum(resource_count.count) from `cloud`.`account` left join `cloud`.`resource_count` on account.id=resource_count.account_id " - + "where resource_count.type=? group by account.domain_id;");) { + for (String resource_type : resource_types) { + try (PreparedStatement sel_dom_id_pstmt = + conn.prepareStatement("select account.domain_id,sum(resource_count.count) from `cloud`.`account` left join `cloud`.`resource_count` on account.id=resource_count.account_id " + + "where resource_count.type=? group by account.domain_id;")) { sel_dom_id_pstmt.setString(1, resource_type); - try(ResultSet sel_dom_res = sel_dom_id_pstmt.executeQuery();) { + try (ResultSet sel_dom_res = sel_dom_id_pstmt.executeQuery()) { while (sel_dom_res.next()) { long domain_id = sel_dom_res.getLong(1); long resource_count = sel_dom_res.getLong(2); upgradeResourceCountforDomain(conn, domain_id, resource_type, resource_count); } - }catch (SQLException e) { + } catch (SQLException e) { throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e); } - }catch (SQLException e) { + } catch (SQLException e) { throw new CloudRuntimeException("Unable to upgrade resource count (cpu,memory,primary_storage,secondary_storage) ", e); } } @@ -2606,7 +2536,7 @@ private void upgradeResourceCount(Connection conn) { private static void upgradeResourceCountforAccount(Connection conn, Long accountId, Long domainId, String type, Long resourceCount) throws SQLException { //update or insert into resource_count table. try(PreparedStatement pstmt = - conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (account_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?");) { + conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (account_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?")) { pstmt.setLong(1, accountId); pstmt.setString(2, type); pstmt.setLong(3, resourceCount); @@ -2619,7 +2549,7 @@ private static void upgradeResourceCountforAccount(Connection conn, Long account private static void upgradeResourceCountforDomain(Connection conn, Long domainId, String type, Long resourceCount) throws SQLException { //update or insert into resource_count table. - try(PreparedStatement pstmt = conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (domain_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?");) { + try(PreparedStatement pstmt = conn.prepareStatement("INSERT INTO `cloud`.`resource_count` (domain_id, type, count) VALUES (?,?,?) ON DUPLICATE KEY UPDATE id=LAST_INSERT_ID(id), count=?")) { pstmt.setLong(1, domainId); pstmt.setString(2, type); pstmt.setLong(3, resourceCount); diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java index c7295414326d..9c1b45607a09 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41500to41510.java @@ -31,7 +31,6 @@ import com.cloud.hypervisor.Hypervisor; import com.cloud.utils.exception.CloudRuntimeException; -import static com.cloud.hypervisor.Hypervisor.HypervisorType.Hyperv; import static com.cloud.hypervisor.Hypervisor.HypervisorType.KVM; import static com.cloud.hypervisor.Hypervisor.HypervisorType.LXC; import static com.cloud.hypervisor.Hypervisor.HypervisorType.Ovm3; @@ -85,12 +84,8 @@ public void updateSystemVmTemplates(final Connection conn) { hypervisorsListInUse.add(KVM); } else if (type.equals(VMware)) { hypervisorsListInUse.add(VMware); - } else if (type.equals(Hyperv)) { - hypervisorsListInUse.add(Hyperv); } else if (type.equals(LXC)) { hypervisorsListInUse.add(LXC); - } else if (type.equals(Ovm3)) { - hypervisorsListInUse.add(Ovm3); } } } catch (final SQLException e) { @@ -103,7 +98,6 @@ public void updateSystemVmTemplates(final Connection conn) { put(KVM, "systemvm-kvm-4.15.1"); put(VMware, "systemvm-vmware-4.15.1"); put(XenServer, "systemvm-xenserver-4.15.1"); - put(Hyperv, "systemvm-hyperv-4.15.1"); put(LXC, "systemvm-lxc-4.15.1"); put(Ovm3, "systemvm-ovm3-4.15.1"); } @@ -114,7 +108,6 @@ public void updateSystemVmTemplates(final Connection conn) { put(KVM, "router.template.kvm"); put(VMware, "router.template.vmware"); put(XenServer, "router.template.xenserver"); - put(Hyperv, "router.template.hyperv"); put(LXC, "router.template.lxc"); put(Ovm3, "router.template.ovm3"); } @@ -125,7 +118,6 @@ public void updateSystemVmTemplates(final Connection conn) { put(KVM, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-kvm.qcow2.bz2"); put(VMware, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-vmware.ova"); put(XenServer, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-xen.vhd.bz2"); - put(Hyperv, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-hyperv.vhd.zip"); put(LXC, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-kvm.qcow2.bz2"); put(Ovm3, "https://download.cloudstack.org/systemvm/4.15/systemvmtemplate-4.15.1-ovm.raw.bz2"); } @@ -136,7 +128,6 @@ public void updateSystemVmTemplates(final Connection conn) { put(KVM, "0e9f9a7d0957c3e0a2088e41b2da2cec"); put(XenServer, "86373992740b1eca8aff8b08ebf3aea5"); put(VMware, "4006982765846d373eb3719b2fe4d720"); - put(Hyperv, "0b9514e4b6cba1f636fea2125f0f7a5f"); put(LXC, "0e9f9a7d0957c3e0a2088e41b2da2cec"); put(Ovm3, "ae3977e696b3e6c81bdcbb792d514d29"); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java index a38382623bf5..35e706595ec1 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade41810to41900.java @@ -159,7 +159,7 @@ private void modifyDateColumnNameAndCreateNewOne(Connection conn) { try (PreparedStatement pstmt = conn.prepareStatement(createNewColumn)) { pstmt.execute(); } catch (SQLException e) { - String message = String.format("Unable to crate new backups' column date due to [%s].", e.getMessage()); + String message = String.format("Unable to create new backups' column date due to [%s].", e.getMessage()); logger.error(message, e); throw new CloudRuntimeException(message, e); } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42200to42210.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42200to42210.java index c9610f7b9ff5..d4e60d799435 100644 --- a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42200to42210.java +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42200to42210.java @@ -16,6 +16,10 @@ // under the License. package com.cloud.upgrade.dao; +import com.cloud.utils.exception.CloudRuntimeException; + +import java.io.InputStream; + public class Upgrade42200to42210 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate { @Override @@ -27,4 +31,16 @@ public String[] getUpgradableVersionRange() { public String getUpgradedVersion() { return "4.22.1.0"; } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-42200to42210.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } + } diff --git a/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42210to42300.java b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42210to42300.java new file mode 100644 index 000000000000..df4743894c9d --- /dev/null +++ b/engine/schema/src/main/java/com/cloud/upgrade/dao/Upgrade42210to42300.java @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package com.cloud.upgrade.dao; + +import java.io.InputStream; + +import com.cloud.utils.exception.CloudRuntimeException; + +public class Upgrade42210to42300 extends DbUpgradeAbstractImpl implements DbUpgrade, DbUpgradeSystemVmTemplate { + + @Override + public String[] getUpgradableVersionRange() { + return new String[]{"4.22.1.0", "4.23.0.0"}; + } + + @Override + public String getUpgradedVersion() { + return "4.23.0.0"; + } + + @Override + public InputStream[] getPrepareScripts() { + final String scriptFile = "META-INF/db/schema-42210to42300.sql"; + final InputStream script = Thread.currentThread().getContextClassLoader().getResourceAsStream(scriptFile); + if (script == null) { + throw new CloudRuntimeException("Unable to find " + scriptFile); + } + + return new InputStream[] {script}; + } +} diff --git a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java index e4fcbad6b02f..c5ca410fc530 100644 --- a/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java +++ b/engine/schema/src/main/java/com/cloud/user/UserAccountVO.java @@ -226,10 +226,6 @@ public Date getCreated() { return created; } -// public void setCreated(Date created) { -// this.created = created; -// } - @Override public Date getRemoved() { return removed; diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java index 79d0b0e149ea..761053a89f0c 100644 --- a/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/UserVmDaoImpl.java @@ -782,7 +782,7 @@ public List> countVmsBySize(long dcId, int li result.add(new Ternary(rs.getInt(1), rs.getInt(2), rs.getInt(3))); } } catch (Exception e) { - logger.warn("Error counting Instances by size for dcId= " + dcId, e); + logger.warn("Error counting Instances by size for Data Center ID = " + dcId, e); } return result; } diff --git a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java index 703fabd2cab1..b4ad7d2f42d1 100755 --- a/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java +++ b/engine/schema/src/main/java/com/cloud/vm/dao/VMInstanceDaoImpl.java @@ -900,7 +900,7 @@ public Long countByZoneAndStateAndHostTag(long dcId, State state, String hostTag return rs.getLong(1); } } catch (Exception e) { - logger.warn(String.format("Error counting Instances by host tag for dcId= %s, hostTag= %s", dcId, hostTag), e); + logger.warn("Error counting Instances by host tag for dcId = {}, hostTag = {}", dcId, hostTag, e); } return 0L; } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupOfferingDetailsVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupOfferingDetailsVO.java new file mode 100644 index 000000000000..6bdf7602a9d4 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupOfferingDetailsVO.java @@ -0,0 +1,86 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup; + +import javax.persistence.Column; +import javax.persistence.Entity; +import javax.persistence.GeneratedValue; +import javax.persistence.GenerationType; +import javax.persistence.Id; +import javax.persistence.Table; + +import org.apache.cloudstack.api.ResourceDetail; + +@Entity +@Table(name = "backup_offering_details") +public class BackupOfferingDetailsVO implements ResourceDetail { + @Id + @GeneratedValue(strategy = GenerationType.IDENTITY) + @Column(name = "id") + private long id; + + @Column(name = "backup_offering_id") + private long resourceId; + + @Column(name = "name") + private String name; + + @Column(name = "value") + private String value; + + @Column(name = "display") + private boolean display = true; + + protected BackupOfferingDetailsVO() { + } + + public BackupOfferingDetailsVO(long backupOfferingId, String name, String value, boolean display) { + this.resourceId = backupOfferingId; + this.name = name; + this.value = value; + this.display = display; + } + + @Override + public long getResourceId() { + return resourceId; + } + + public void setResourceId(long backupOfferingId) { + this.resourceId = backupOfferingId; + } + + @Override + public String getName() { + return name; + } + + @Override + public String getValue() { + return value; + } + + @Override + public long getId() { + return id; + } + + @Override + public boolean isDisplay() { + return display; + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupOfferingVO.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupOfferingVO.java index d30385af575d..ebeb7d4a2d59 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupOfferingVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/BackupOfferingVO.java @@ -17,6 +17,8 @@ package org.apache.cloudstack.backup; +import org.apache.cloudstack.utils.reflectiontostringbuilderutils.ReflectionToStringBuilderUtils; + import java.util.Date; import java.util.UUID; @@ -131,4 +133,9 @@ public void setDescription(String description) { public Date getCreated() { return created; } + + @Override + public String toString() { + return String.format("Backup offering %s.", ReflectionToStringBuilderUtils.reflectOnlySelectedFields(this, "id", "name", "uuid")); + } } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java index a41e4e70d339..708faeef4643 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDaoImpl.java @@ -20,6 +20,8 @@ import javax.annotation.PostConstruct; import javax.inject.Inject; +import com.cloud.domain.DomainVO; +import com.cloud.domain.dao.DomainDao; import org.apache.cloudstack.api.response.BackupOfferingResponse; import org.apache.cloudstack.backup.BackupOffering; import org.apache.cloudstack.backup.BackupOfferingVO; @@ -30,10 +32,16 @@ import com.cloud.utils.db.SearchBuilder; import com.cloud.utils.db.SearchCriteria; +import java.util.List; + public class BackupOfferingDaoImpl extends GenericDaoBase implements BackupOfferingDao { @Inject DataCenterDao dataCenterDao; + @Inject + BackupOfferingDetailsDao backupOfferingDetailsDao; + @Inject + DomainDao domainDao; private SearchBuilder backupPoliciesSearch; @@ -51,8 +59,9 @@ protected void init() { @Override public BackupOfferingResponse newBackupOfferingResponse(BackupOffering offering, Boolean crossZoneInstanceCreation) { - DataCenterVO zone = dataCenterDao.findById(offering.getZoneId()); + DataCenterVO zone = dataCenterDao.findById(offering.getZoneId()); + List domainIds = backupOfferingDetailsDao.findDomainIds(offering.getId()); BackupOfferingResponse response = new BackupOfferingResponse(); response.setId(offering.getUuid()); response.setName(offering.getName()); @@ -64,6 +73,18 @@ public BackupOfferingResponse newBackupOfferingResponse(BackupOffering offering, response.setZoneId(zone.getUuid()); response.setZoneName(zone.getName()); } + if (domainIds != null && !domainIds.isEmpty()) { + String domainUUIDs = domainIds.stream().map(Long::valueOf).map(domainId -> { + DomainVO domain = domainDao.findById(domainId); + return domain != null ? domain.getUuid() : ""; + }).filter(name -> !name.isEmpty()).reduce((a, b) -> a + "," + b).orElse(""); + String domainNames = domainIds.stream().map(Long::valueOf).map(domainId -> { + DomainVO domain = domainDao.findById(domainId); + return domain != null ? domain.getName() : ""; + }).filter(name -> !name.isEmpty()).reduce((a, b) -> a + "," + b).orElse(""); + response.setDomain(domainNames); + response.setDomainId(domainUUIDs); + } if (crossZoneInstanceCreation) { response.setCrossZoneInstanceCreation(true); } diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDetailsDao.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDetailsDao.java new file mode 100644 index 000000000000..390fcba1e0e7 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDetailsDao.java @@ -0,0 +1,32 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup.dao; + +import java.util.List; + +import org.apache.cloudstack.backup.BackupOfferingDetailsVO; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDao; + +import com.cloud.utils.db.GenericDao; + +public interface BackupOfferingDetailsDao extends GenericDao, ResourceDetailsDao { + List findDomainIds(final long resourceId); + List findZoneIds(final long resourceId); + String getDetail(Long backupOfferingId, String key); + List findOfferingIdsByDomainIds(List domainIds); + void updateBackupOfferingDomainIdsDetail(long backupOfferingId, List filteredDomainIds); +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDetailsDaoImpl.java b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDetailsDaoImpl.java new file mode 100644 index 000000000000..f052c93f9817 --- /dev/null +++ b/engine/schema/src/main/java/org/apache/cloudstack/backup/dao/BackupOfferingDetailsDaoImpl.java @@ -0,0 +1,101 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +package org.apache.cloudstack.backup.dao; + + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +import com.cloud.utils.db.DB; +import com.cloud.utils.db.SearchBuilder; +import com.cloud.utils.db.SearchCriteria; +import org.apache.cloudstack.api.ApiConstants; +import org.apache.cloudstack.backup.BackupOfferingDetailsVO; +import org.apache.cloudstack.resourcedetail.ResourceDetailsDaoBase; +import org.springframework.stereotype.Component; + +@Component +public class BackupOfferingDetailsDaoImpl extends ResourceDetailsDaoBase implements BackupOfferingDetailsDao { + + @Override + public void addDetail(long resourceId, String key, String value, boolean display) { + super.addDetail(new BackupOfferingDetailsVO(resourceId, key, value, display)); + } + + @Override + public List findDomainIds(long resourceId) { + final List domainIds = new ArrayList<>(); + for (final BackupOfferingDetailsVO detail: findDetails(resourceId, ApiConstants.DOMAIN_ID)) { + final Long domainId = Long.valueOf(detail.getValue()); + if (domainId > 0) { + domainIds.add(domainId); + } + } + return domainIds; + } + + @Override + public List findZoneIds(long resourceId) { + final List zoneIds = new ArrayList<>(); + for (final BackupOfferingDetailsVO detail: findDetails(resourceId, ApiConstants.ZONE_ID)) { + final Long zoneId = Long.valueOf(detail.getValue()); + if (zoneId > 0) { + zoneIds.add(zoneId); + } + } + return zoneIds; + } + + @Override + public String getDetail(Long backupOfferingId, String key) { + String detailValue = null; + BackupOfferingDetailsVO backupOfferingDetail = findDetail(backupOfferingId, key); + if (backupOfferingDetail != null) { + detailValue = backupOfferingDetail.getValue(); + } + return detailValue; + } + + @Override + public List findOfferingIdsByDomainIds(List domainIds) { + Object[] dIds = domainIds.stream().map(s -> String.valueOf(s)).collect(Collectors.toList()).toArray(); + return findResourceIdsByNameAndValueIn("domainid", dIds); + } + + @DB + @Override + public void updateBackupOfferingDomainIdsDetail(long backupOfferingId, List filteredDomainIds) { + SearchBuilder sb = createSearchBuilder(); + List detailsVO = new ArrayList<>(); + sb.and("offeringId", sb.entity().getResourceId(), SearchCriteria.Op.EQ); + sb.and("detailName", sb.entity().getName(), SearchCriteria.Op.EQ); + sb.done(); + SearchCriteria sc = sb.create(); + sc.setParameters("offeringId", String.valueOf(backupOfferingId)); + sc.setParameters("detailName", ApiConstants.DOMAIN_ID); + remove(sc); + for (Long domainId : filteredDomainIds) { + detailsVO.add(new BackupOfferingDetailsVO(backupOfferingId, ApiConstants.DOMAIN_ID, String.valueOf(domainId), false)); + } + if (!detailsVO.isEmpty()) { + for (BackupOfferingDetailsVO detailVO : detailsVO) { + persist(detailVO); + } + } + } +} diff --git a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java index d57dec8fbfd5..c475a4203a73 100644 --- a/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java +++ b/engine/schema/src/main/java/org/apache/cloudstack/storage/datastore/db/VolumeDataStoreVO.java @@ -209,10 +209,8 @@ public VolumeDataStoreVO(long hostId, long volumeId) { public VolumeDataStoreVO(long hostId, long volumeId, Date lastUpdated, int downloadPercent, Status downloadState, String localDownloadPath, String errorString, String jobId, String installPath, String downloadUrl, String checksum) { - // super(); dataStoreId = hostId; this.volumeId = volumeId; - // this.zoneId = zoneId; this.lastUpdated = lastUpdated; this.downloadPercent = downloadPercent; this.downloadState = downloadState; diff --git a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml index d308a9e5aaf9..1846c3c62a0e 100644 --- a/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml +++ b/engine/schema/src/main/resources/META-INF/cloudstack/core/spring-engine-schema-core-common-daos-between-management-and-usage-context.xml @@ -71,6 +71,7 @@ - + + diff --git a/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql b/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql index 55d78b594377..3b0cfa8e6ce3 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-307to410.sql @@ -3,7 +3,7 @@ -- distributed with this work for additional information -- regarding copyright ownership. The ASF licenses this file -- to you under the Apache License, Version 2.0 (the --- "License"); you may not use this file except in compliances +-- "License"); you may not use this file except in compliance -- with the License. You may obtain a copy of the License at -- -- http://www.apache.org/licenses/LICENSE-2.0 diff --git a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql index d62a9bb93034..1a6d6a2d66ca 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-410to420.sql @@ -2187,7 +2187,6 @@ INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'manag INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'interval.baremetal.securitygroup.agent.echo', 10, 'Interval to echo baremetal security group agent, in seconds'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'management-server', 'timeout.baremetal.securitygroup.agent.echo', 3600, 'Timeout to echo baremetal security group agent, in seconds, the provisioning process will be treated as a failure'); -INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'router.template.hyperv', 'SystemVM Template (HyperV)', 'Name of the default router template on Hyperv.'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'router.template.kvm', 'SystemVM Template (KVM)', 'Name of the default router template on KVM.'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'router.template.lxc', 'SystemVM Template (LXC)', 'Name of the default router template on LXC.'); INSERT IGNORE INTO `cloud`.`configuration` VALUES ('Advanced', 'DEFAULT', 'NetworkManager', 'router.template.vmware', 'SystemVM Template (vSphere)', 'Name of the default router template on Vmware.'); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql b/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql index 2464a8a57ce4..3b24252df0fb 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-41520to41600.sql @@ -24,7 +24,7 @@ ALTER TABLE `cloud`.`user_vm` ADD COLUMN `user_vm_type` varchar(255) DEFAULT "Us -- This is set, so as to ensure that the controller details from the ovf template are adhered to UPDATE `cloud`.`vm_template` set deploy_as_is = 1 where id = 8; -DELETE FROM `cloud`.`configuration` WHERE name IN ("cloud.kubernetes.cluster.template.name.kvm", "cloud.kubernetes.cluster.template.name.vmware", "cloud.kubernetes.cluster.template.name.xenserver", "cloud.kubernetes.cluster.template.name.hyperv"); +DELETE FROM `cloud`.`configuration` WHERE name IN ("cloud.kubernetes.cluster.template.name.kvm", "cloud.kubernetes.cluster.template.name.vmware", "cloud.kubernetes.cluster.template.name.xenserver"); ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `autoscaling_enabled` tinyint(1) unsigned NOT NULL DEFAULT 0; ALTER TABLE `cloud`.`kubernetes_cluster` ADD COLUMN `minsize` bigint; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42200to42210.sql b/engine/schema/src/main/resources/META-INF/db/schema-42200to42210.sql index f5543756ed61..e7b5adef8171 100644 --- a/engine/schema/src/main/resources/META-INF/db/schema-42200to42210.sql +++ b/engine/schema/src/main/resources/META-INF/db/schema-42200to42210.sql @@ -26,4 +26,15 @@ CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_event','vm_id', 'b -- Add vm_id column to cloud_usage.usage_volume table CALL `cloud_usage`.`IDEMPOTENT_ADD_COLUMN`('cloud_usage.usage_volume','vm_id', 'bigint UNSIGNED NULL COMMENT "VM ID associated with the volume usage"'); +DELETE FROM `cloud`.`configuration` WHERE name = 'ucs.sync.blade.interval'; + +UPDATE `cloud`.`configuration` SET value = 'KVM,VMware,XenServer,Hyperv,BareMetal,Ovm,LXC,Ovm3,External' WHERE name = 'hypervisor.list'; +UPDATE `cloud`.`configuration` SET value = 'Hypervisor type used to create system vm, valid values are: XenServer, KVM, VMware, VirtualBox, Parralels, BareMetal, Any' WHERE name = 'system.vm.default.hypervisor'; +DELETE FROM `cloud`.`configuration` WHERE name = 'hyperv.public.network.device'; +DELETE FROM `cloud`.`configuration` WHERE name = 'hyperv.private.network.device' +DELETE FROM `cloud`.`configuration` WHERE name = 'hyperv.guest.network.device' + +DELETE FROM `cloud`.`configuration` WHERE name = 'router.template.hyperv'; +DELETE FROM `cloud`.`configuration` WHERE name = 'router.template.ovm3'; + ALTER TABLE `cloud`.`template_store_ref` MODIFY COLUMN `download_url` varchar(2048); diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42210to42300-cleanup.sql b/engine/schema/src/main/resources/META-INF/db/schema-42210to42300-cleanup.sql new file mode 100644 index 000000000000..e2b066af7800 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-42210to42300-cleanup.sql @@ -0,0 +1,20 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade cleanup from 4.22.1.0 to 4.23.0.0 +--; diff --git a/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql b/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql new file mode 100644 index 000000000000..d330ecd0c0d5 --- /dev/null +++ b/engine/schema/src/main/resources/META-INF/db/schema-42210to42300.sql @@ -0,0 +1,51 @@ +-- Licensed to the Apache Software Foundation (ASF) under one +-- or more contributor license agreements. See the NOTICE file +-- distributed with this work for additional information +-- regarding copyright ownership. The ASF licenses this file +-- to you under the Apache License, Version 2.0 (the +-- "License"); you may not use this file except in compliance +-- with the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, +-- software distributed under the License is distributed on an +-- "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +-- KIND, either express or implied. See the License for the +-- specific language governing permissions and limitations +-- under the License. + +--; +-- Schema upgrade from 4.22.1.0 to 4.23.0.0 +--; + +CREATE TABLE `cloud`.`backup_offering_details` ( + `id` bigint unsigned NOT NULL auto_increment, + `backup_offering_id` bigint unsigned NOT NULL COMMENT 'Backup offering id', + `name` varchar(255) NOT NULL, + `value` varchar(1024) NOT NULL, + `display` tinyint(1) NOT NULL DEFAULT 1 COMMENT 'Should detail be displayed to the end user', + PRIMARY KEY (`id`), + CONSTRAINT `fk_offering_details__backup_offering_id` FOREIGN KEY `fk_offering_details__backup_offering_id`(`backup_offering_id`) REFERENCES `backup_offering`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +-- Update value to random for the config 'vm.allocation.algorithm' or 'volume.allocation.algorithm' if configured as userconcentratedpod_random +-- Update value to firstfit for the config 'vm.allocation.algorithm' or 'volume.allocation.algorithm' if configured as userconcentratedpod_firstfit +UPDATE `cloud`.`configuration` SET value='random' WHERE name IN ('vm.allocation.algorithm', 'volume.allocation.algorithm') AND value='userconcentratedpod_random'; +UPDATE `cloud`.`configuration` SET value='firstfit' WHERE name IN ('vm.allocation.algorithm', 'volume.allocation.algorithm') AND value='userconcentratedpod_firstfit'; + +-- Create webhook_filter table +DROP TABLE IF EXISTS `cloud`.`webhook_filter`; +CREATE TABLE IF NOT EXISTS `cloud`.`webhook_filter` ( + `id` bigint unsigned NOT NULL AUTO_INCREMENT COMMENT 'id of the webhook filter', + `uuid` varchar(255) COMMENT 'uuid of the webhook filter', + `webhook_id` bigint unsigned NOT NULL COMMENT 'id of the webhook', + `type` varchar(20) COMMENT 'type of the filter', + `mode` varchar(20) COMMENT 'mode of the filter', + `match_type` varchar(20) COMMENT 'match type of the filter', + `value` varchar(256) NOT NULL COMMENT 'value of the filter used for matching', + `created` datetime NOT NULL COMMENT 'date created', + PRIMARY KEY (`id`), + INDEX `i_webhook_filter__webhook_id`(`webhook_id`), + CONSTRAINT `fk_webhook_filter__webhook_id` FOREIGN KEY(`webhook_id`) REFERENCES `webhook`(`id`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; diff --git a/engine/schema/src/test/java/com/cloud/storage/dao/VMTemplateDaoImplTest.java b/engine/schema/src/test/java/com/cloud/storage/dao/VMTemplateDaoImplTest.java index 3c8e4c046ae7..5cff77869be8 100644 --- a/engine/schema/src/test/java/com/cloud/storage/dao/VMTemplateDaoImplTest.java +++ b/engine/schema/src/test/java/com/cloud/storage/dao/VMTemplateDaoImplTest.java @@ -76,7 +76,8 @@ public void testFindLatestTemplateByName_ReturnsTemplate() { VMTemplateVO expectedTemplate = new VMTemplateVO(); List returnedList = Collections.singletonList(expectedTemplate); doReturn(returnedList).when(templateDao).listBy(any(SearchCriteria.class), any(Filter.class)); - VMTemplateVO result = templateDao.findLatestTemplateByName("test", CPU.CPUArch.getDefault()); + VMTemplateVO result = templateDao.findLatestTemplateByName("test", Hypervisor.HypervisorType.KVM, + CPU.CPUArch.getDefault()); assertNotNull("Expected a non-null template", result); assertEquals("Expected the returned template to be the first element", expectedTemplate, result); } @@ -85,7 +86,8 @@ public void testFindLatestTemplateByName_ReturnsTemplate() { public void testFindLatestTemplateByName_ReturnsNullWhenNoTemplateFound() { List emptyList = Collections.emptyList(); doReturn(emptyList).when(templateDao).listBy(any(SearchCriteria.class), any(Filter.class)); - VMTemplateVO result = templateDao.findLatestTemplateByName("test", CPU.CPUArch.getDefault()); + VMTemplateVO result = templateDao.findLatestTemplateByName("test", Hypervisor.HypervisorType.VMware, + CPU.CPUArch.getDefault()); assertNull("Expected null when no templates are found", result); } @@ -94,7 +96,8 @@ public void testFindLatestTemplateByName_NullArch() { VMTemplateVO expectedTemplate = new VMTemplateVO(); List returnedList = Collections.singletonList(expectedTemplate); doReturn(returnedList).when(templateDao).listBy(any(SearchCriteria.class), any(Filter.class)); - VMTemplateVO result = templateDao.findLatestTemplateByName("test", null); + VMTemplateVO result = templateDao.findLatestTemplateByName("test", Hypervisor.HypervisorType.XenServer, + null); assertNotNull("Expected a non-null template even if arch is null", result); assertEquals("Expected the returned template to be the first element", expectedTemplate, result); } @@ -337,4 +340,82 @@ public void testFindSystemVMReadyTemplate() { VMTemplateVO readyTemplate = templateDao.findSystemVMReadyTemplate(zoneId, Hypervisor.HypervisorType.KVM, CPU.CPUArch.arm64.getType()); Assert.assertEquals(CPU.CPUArch.arm64, readyTemplate.getArch()); } + + @Test + public void findActiveSystemTemplateByHypervisorArchAndUrlPath_ReturnsTemplate() { + VMTemplateVO expectedTemplate = mock(VMTemplateVO.class); + SearchBuilder sb = mock(SearchBuilder.class); + when(sb.entity()).thenReturn(expectedTemplate); + SearchCriteriasc = mock(SearchCriteria.class); + when(sb.create()).thenReturn(sc); + when(templateDao.createSearchBuilder()).thenReturn(sb); + List templates = Collections.singletonList(expectedTemplate); + doReturn(templates).when(templateDao).listBy(any(SearchCriteria.class), any(Filter.class)); + + VMTemplateVO result = templateDao.findActiveSystemTemplateByHypervisorArchAndUrlPath( + Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64, "testPath"); + + assertNotNull(result); + assertEquals(expectedTemplate, result); + } + + @Test + public void findActiveSystemTemplateByHypervisorArchAndUrlPath_ReturnsNullWhenNoTemplatesFound() { + VMTemplateVO template = mock(VMTemplateVO.class); + SearchBuilder sb = mock(SearchBuilder.class); + when(sb.entity()).thenReturn(template); + SearchCriteriasc = mock(SearchCriteria.class); + when(sb.create()).thenReturn(sc); + when(templateDao.createSearchBuilder()).thenReturn(sb); + doReturn(Collections.emptyList()).when(templateDao).listBy(any(SearchCriteria.class), any(Filter.class)); + + VMTemplateVO result = templateDao.findActiveSystemTemplateByHypervisorArchAndUrlPath( + Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64, "testPath"); + + assertNull(result); + } + + @Test + public void findActiveSystemTemplateByHypervisorArchAndUrlPath_NullHypervisor() { + VMTemplateVO expectedTemplate = mock(VMTemplateVO.class); + SearchBuilder sb = mock(SearchBuilder.class); + when(sb.entity()).thenReturn(expectedTemplate); + SearchCriteriasc = mock(SearchCriteria.class); + when(sb.create()).thenReturn(sc); + when(templateDao.createSearchBuilder()).thenReturn(sb); + List templates = Collections.singletonList(expectedTemplate); + doReturn(templates).when(templateDao).listBy(any(SearchCriteria.class), any(Filter.class)); + + VMTemplateVO result = templateDao.findActiveSystemTemplateByHypervisorArchAndUrlPath( + null, CPU.CPUArch.amd64, "testPath"); + + assertNotNull(result); + assertEquals(expectedTemplate, result); + } + + @Test + public void findActiveSystemTemplateByHypervisorArchAndUrlPath_NullArch() { + VMTemplateVO expectedTemplate = mock(VMTemplateVO.class); + SearchBuilder sb = mock(SearchBuilder.class); + when(sb.entity()).thenReturn(expectedTemplate); + SearchCriteriasc = mock(SearchCriteria.class); + when(sb.create()).thenReturn(sc); + when(templateDao.createSearchBuilder()).thenReturn(sb); + List templates = Collections.singletonList(expectedTemplate); + doReturn(templates).when(templateDao).listBy(any(SearchCriteria.class), any(Filter.class)); + + VMTemplateVO result = templateDao.findActiveSystemTemplateByHypervisorArchAndUrlPath( + Hypervisor.HypervisorType.KVM, null, "testPath"); + + assertNotNull(result); + assertEquals(expectedTemplate, result); + } + + @Test + public void findActiveSystemTemplateByHypervisorArchAndUrlPath_EmptyUrlPathSuffix() { + VMTemplateVO result = templateDao.findActiveSystemTemplateByHypervisorArchAndUrlPath( + Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64, ""); + + assertNull(result); + } } diff --git a/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java b/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java index b943f48ad36e..8ddb13e706fa 100644 --- a/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java +++ b/engine/schema/src/test/java/com/cloud/upgrade/SystemVmTemplateRegistrationTest.java @@ -17,6 +17,7 @@ package com.cloud.upgrade; +import static com.cloud.upgrade.SystemVmTemplateRegistration.DEFAULT_SYSTEM_VM_GUEST_OS_NAME; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -25,24 +26,41 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.File; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Date; import java.util.List; +import java.util.Map; +import org.apache.cloudstack.engine.subsystem.api.storage.ObjectInDataStoreStateMachine; +import org.apache.cloudstack.framework.config.dao.ConfigurationDao; +import org.apache.cloudstack.framework.config.impl.ConfigurationVO; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreDetailsDao; +import org.apache.cloudstack.storage.datastore.db.ImageStoreVO; +import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreDao; import org.apache.cloudstack.storage.datastore.db.TemplateDataStoreVO; import org.apache.cloudstack.utils.security.DigestHelper; import org.apache.commons.lang3.StringUtils; +import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.InjectMocks; @@ -53,15 +71,27 @@ import org.mockito.junit.MockitoJUnitRunner; import com.cloud.cpu.CPU; +import com.cloud.dc.DataCenterVO; import com.cloud.dc.dao.ClusterDao; +import com.cloud.dc.dao.DataCenterDao; +import com.cloud.dc.dao.DataCenterDetailsDao; import com.cloud.hypervisor.Hypervisor; +import com.cloud.storage.DataStoreRole; +import com.cloud.storage.GuestOSVO; +import com.cloud.storage.Storage; +import com.cloud.storage.VMTemplateStorageResourceAssoc; import com.cloud.storage.VMTemplateVO; +import com.cloud.storage.VMTemplateZoneVO; +import com.cloud.storage.dao.GuestOSDao; import com.cloud.storage.dao.VMTemplateDao; +import com.cloud.storage.dao.VMTemplateZoneDao; +import com.cloud.template.VirtualMachineTemplate; import com.cloud.utils.HttpUtils; import com.cloud.utils.Pair; import com.cloud.utils.UriUtils; import com.cloud.utils.exception.CloudRuntimeException; import com.cloud.utils.script.Script; +import com.cloud.vm.dao.VMInstanceDao; @RunWith(MockitoJUnitRunner.class) public class SystemVmTemplateRegistrationTest { @@ -72,10 +102,42 @@ public class SystemVmTemplateRegistrationTest { @Mock VMTemplateDao vmTemplateDao; + @Mock + GuestOSDao guestOSDao; + + @Mock + TemplateDataStoreDao templateDataStoreDao; + + @Mock + ConfigurationDao configurationDao; + + @Mock + DataCenterDao dataCenterDao; + + @Mock + DataCenterDetailsDao dataCenterDetailsDao; + + @Mock + VMTemplateZoneDao vmTemplateZoneDao; + + @Mock + ImageStoreDao imageStoreDao; + + @Mock + ImageStoreDetailsDao imageStoreDetailsDao; + + @Mock + VMInstanceDao vmInstanceDao; + @Spy @InjectMocks SystemVmTemplateRegistration systemVmTemplateRegistration = new SystemVmTemplateRegistration(); + @Before + public void setup() { + SystemVmTemplateRegistration.METADATA_TEMPLATE_LIST.clear(); + } + private void setupMetadataFile(MockedStatic mockedStatic, String content) { try { String location = "metadata.ini"; @@ -98,7 +160,7 @@ public void test_parseMetadataFile_noFile() { setupMetadataFile(mockedStatic, null); CloudRuntimeException exception = assertThrows(CloudRuntimeException.class, SystemVmTemplateRegistration::parseMetadataFile); - assertTrue(exception.getMessage().contains("Failed to parse systemVM Template metadata file")); + assertTrue(exception.getMessage().contains("Failed to parse system VM Template metadata file")); } } @@ -109,7 +171,7 @@ public void test_parseMetadataFile_invalidContent() { setupMetadataFile(mockedStatic, "abc"); CloudRuntimeException exception = assertThrows(CloudRuntimeException.class, SystemVmTemplateRegistration::parseMetadataFile); - assertTrue(exception.getMessage().contains("Failed to parse systemVM Template metadata file")); + assertTrue(exception.getMessage().contains("Failed to parse system VM Template metadata file")); } } @@ -141,21 +203,25 @@ public void test_parseMetadataFile_success() { String version = SystemVmTemplateRegistration.parseMetadataFile(); assertEquals("x.y.z.0", version); } - assertNull(SystemVmTemplateRegistration.NewTemplateMap.get("xenserver")); + assertNull(SystemVmTemplateRegistration.getMetadataTemplateDetails(Hypervisor.HypervisorType.XenServer, + CPU.CPUArch.getDefault())); SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = - SystemVmTemplateRegistration.NewTemplateMap.get("kvm-x86_64"); + SystemVmTemplateRegistration.getMetadataTemplateDetails(Hypervisor.HypervisorType.KVM, + CPU.CPUArch.amd64); assertNotNull(templateDetails); assertEquals(CPU.CPUArch.amd64, templateDetails.getArch()); assertEquals(Hypervisor.HypervisorType.KVM, templateDetails.getHypervisorType()); templateDetails = - SystemVmTemplateRegistration.NewTemplateMap.get("kvm-aarch64"); + SystemVmTemplateRegistration.getMetadataTemplateDetails(Hypervisor.HypervisorType.KVM, + CPU.CPUArch.arm64); assertNotNull(templateDetails); assertEquals(CPU.CPUArch.arm64, templateDetails.getArch()); assertEquals(Hypervisor.HypervisorType.KVM, templateDetails.getHypervisorType()); templateDetails = - SystemVmTemplateRegistration.NewTemplateMap.get("vmware"); + SystemVmTemplateRegistration.getMetadataTemplateDetails(Hypervisor.HypervisorType.VMware, + CPU.CPUArch.getDefault()); assertNotNull(templateDetails); - assertNull(templateDetails.getArch()); + assertEquals(CPU.CPUArch.getDefault(), templateDetails.getArch()); assertEquals(Hypervisor.HypervisorType.VMware, templateDetails.getHypervisorType()); } @@ -193,11 +259,10 @@ public void testValidateTemplateFile_fileNotFound() { SystemVmTemplateRegistration.MetadataTemplateDetails details = new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM, "name", "file", "url", "checksum", CPU.CPUArch.amd64, "guestos"); - SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey( - details.getHypervisorType(), details.getArch()), details); + SystemVmTemplateRegistration.METADATA_TEMPLATE_LIST.add(details); doReturn(null).when(systemVmTemplateRegistration).getTemplateFile(details); try { - systemVmTemplateRegistration.validateTemplateFileForHypervisorAndArch(details.getHypervisorType(), + systemVmTemplateRegistration.getValidatedTemplateDetailsForHypervisorAndArch(details.getHypervisorType(), details.getArch()); fail("Expected CloudRuntimeException due to missing template file"); } catch (CloudRuntimeException e) { @@ -211,12 +276,11 @@ public void testValidateTemplateFile_checksumMismatch() { new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM, "name", "file", "url", "checksum", CPU.CPUArch.amd64, "guestos"); File dummyFile = new File("dummy.txt"); - SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey( - details.getHypervisorType(), details.getArch()), details); + SystemVmTemplateRegistration.METADATA_TEMPLATE_LIST.add(details); doReturn(dummyFile).when(systemVmTemplateRegistration).getTemplateFile(details); - doReturn(true).when(systemVmTemplateRegistration).isTemplateFileChecksumDifferent(details, dummyFile); - try { - systemVmTemplateRegistration.validateTemplateFileForHypervisorAndArch(details.getHypervisorType(), + try (MockedStatic digestMock = Mockito.mockStatic(DigestHelper.class)) { + digestMock.when(() -> DigestHelper.calculateChecksum(dummyFile)).thenReturn("differentChecksum"); + systemVmTemplateRegistration.getValidatedTemplateDetailsForHypervisorAndArch(details.getHypervisorType(), details.getArch()); fail("Expected CloudRuntimeException due to checksum failure"); } catch (CloudRuntimeException e) { @@ -230,42 +294,55 @@ public void testValidateTemplateFile_success() { new SystemVmTemplateRegistration.MetadataTemplateDetails(Hypervisor.HypervisorType.KVM, "name", "file", "url", "checksum", CPU.CPUArch.amd64, "guestos"); File dummyFile = new File("dummy.txt"); - SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey( - details.getHypervisorType(), details.getArch()), details); + SystemVmTemplateRegistration.METADATA_TEMPLATE_LIST.add(details); doReturn(dummyFile).when(systemVmTemplateRegistration).getTemplateFile(details); - doReturn(false).when(systemVmTemplateRegistration).isTemplateFileChecksumDifferent(details, dummyFile); - systemVmTemplateRegistration.validateTemplateFileForHypervisorAndArch(details.getHypervisorType(), - details.getArch()); + try (MockedStatic digestMock = Mockito.mockStatic(DigestHelper.class)) { + digestMock.when(() -> DigestHelper.calculateChecksum(dummyFile)).thenReturn("checksum"); + systemVmTemplateRegistration.getValidatedTemplateDetailsForHypervisorAndArch(details.getHypervisorType(), + details.getArch()); + } } @Test - public void testValidateAndRegisterTemplate() { + public void testValidateAndAddExistingTemplateToStore() { + long zoneId = 1L; Hypervisor.HypervisorType hypervisor = Hypervisor.HypervisorType.KVM; - String name = "TestTemplate"; - Long storeId = 123L; VMTemplateVO templateVO = new VMTemplateVO(); - templateVO.setArch(CPU.CPUArch.x86); + templateVO.setHypervisorType(hypervisor); + templateVO.setArch(CPU.CPUArch.getDefault()); TemplateDataStoreVO templateDataStoreVO = new TemplateDataStoreVO(); + Long storeId = 123L; String filePath = "/dummy/path"; - doNothing().when(systemVmTemplateRegistration).validateTemplateFileForHypervisorAndArch(hypervisor, templateVO.getArch()); - doNothing().when(systemVmTemplateRegistration).registerTemplate(hypervisor, name, storeId, templateVO, templateDataStoreVO, filePath); - systemVmTemplateRegistration.validateAndRegisterTemplate(hypervisor, name, storeId, templateVO, templateDataStoreVO, filePath); - verify(systemVmTemplateRegistration).validateTemplateFileForHypervisorAndArch(eq(hypervisor), eq(templateVO.getArch())); - verify(systemVmTemplateRegistration).registerTemplate(eq(hypervisor), eq(name), eq(storeId), eq(templateVO), eq(templateDataStoreVO), eq(filePath)); + SystemVmTemplateRegistration.MetadataTemplateDetails details = + mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + doReturn(details).when(systemVmTemplateRegistration) + .getValidatedTemplateDetailsForHypervisorAndArch(hypervisor, templateVO.getArch()); + doNothing().when(systemVmTemplateRegistration).addExistingTemplateToStore(templateVO, details, + templateDataStoreVO, zoneId, storeId, filePath); + systemVmTemplateRegistration.validateAndAddTemplateToStore(templateVO, templateDataStoreVO, zoneId, storeId, + filePath); + verify(systemVmTemplateRegistration) + .getValidatedTemplateDetailsForHypervisorAndArch(hypervisor, templateVO.getArch()); + verify(systemVmTemplateRegistration).addExistingTemplateToStore(templateVO, details, templateDataStoreVO, + zoneId, storeId, filePath); } @Test - public void testValidateAndRegisterTemplateForNonExistingEntries() { + public void testValidateAndAddExistingTemplateToStoreForNonExistingEntries() { + long zoneId = 1L; Hypervisor.HypervisorType hypervisor = Hypervisor.HypervisorType.KVM; CPU.CPUArch arch = CPU.CPUArch.amd64; String name = "TestTemplateNonExisting"; - Pair storeUrlAndId = new Pair<>("nfs://dummy", 456L); + long storeId = 123L; String filePath = "/dummy/path/nonexisting"; - doNothing().when(systemVmTemplateRegistration).validateTemplateFileForHypervisorAndArch(hypervisor, arch); - doNothing().when(systemVmTemplateRegistration).registerTemplateForNonExistingEntries(hypervisor, arch, name, storeUrlAndId, filePath); - systemVmTemplateRegistration.validateAndRegisterTemplateForNonExistingEntries(hypervisor, arch, name, storeUrlAndId, filePath); - verify(systemVmTemplateRegistration).validateTemplateFileForHypervisorAndArch(eq(hypervisor), eq(arch)); - verify(systemVmTemplateRegistration).registerTemplateForNonExistingEntries(eq(hypervisor), eq(arch), eq(name), eq(storeUrlAndId), eq(filePath)); + SystemVmTemplateRegistration.MetadataTemplateDetails details = + mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + doReturn(details).when(systemVmTemplateRegistration) + .getValidatedTemplateDetailsForHypervisorAndArch(hypervisor, arch); + doNothing().when(systemVmTemplateRegistration).registerNewTemplate(name, details, zoneId, storeId, filePath); + systemVmTemplateRegistration.validateAndRegisterNewTemplate(hypervisor, arch, name, zoneId, storeId, filePath); + verify(systemVmTemplateRegistration).getValidatedTemplateDetailsForHypervisorAndArch(hypervisor, arch); + verify(systemVmTemplateRegistration).registerNewTemplate(name, details, zoneId, storeId, filePath); } @Test @@ -316,86 +393,73 @@ public void testGetTemplateFile_fileDoesNotExist_downloadSucceeds() { } @Test - public void testIsTemplateFileChecksumDifferent_noMismatch() { - SystemVmTemplateRegistration.MetadataTemplateDetails details = - Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); - when(details.getChecksum()).thenReturn("dummyChecksum"); - File file = new File("dummy.txt"); - try (MockedStatic digestMock = Mockito.mockStatic(DigestHelper.class)) { - digestMock.when(() -> DigestHelper.calculateChecksum(file)).thenReturn("dummyChecksum"); - boolean result = systemVmTemplateRegistration.isTemplateFileChecksumDifferent(details, file); - assertFalse(result); - } - } - - @Test - public void testIsTemplateFileChecksumDifferent_mismatch() { - SystemVmTemplateRegistration.MetadataTemplateDetails details = - Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); - when(details.getChecksum()).thenReturn("expectedChecksum"); - File file = new File("dummy.txt"); - try (MockedStatic digestMock = Mockito.mockStatic(DigestHelper.class)) { - digestMock.when(() -> DigestHelper.calculateChecksum(file)).thenReturn("actualChecksum"); - boolean result = systemVmTemplateRegistration.isTemplateFileChecksumDifferent(details, file); - assertTrue(result); - } - } - - @Test(expected = CloudRuntimeException.class) - public void testValidateTemplates_metadataTemplateFailure() { + public void testValidateTemplates_metadataTemplateSkip() { + Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.VMware; + CPU.CPUArch arch = CPU.CPUArch.arm64; List> list = new ArrayList<>(); - list.add(new Pair<>(Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64)); + list.add(new Pair<>(hypervisorType, arch)); systemVmTemplateRegistration.validateTemplates(list); + verify(systemVmTemplateRegistration, never()).getValidatedTemplateDetailsForHypervisorAndArch(hypervisorType, + arch); } @Test(expected = CloudRuntimeException.class) public void testValidateTemplates_fileFailure() { + Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM; + CPU.CPUArch arch = CPU.CPUArch.amd64; List> list = new ArrayList<>(); - list.add(new Pair<>(Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64)); - + list.add(new Pair<>(hypervisorType, arch)); SystemVmTemplateRegistration.MetadataTemplateDetails details = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); - SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey( - Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64), details); + when(details.getHypervisorType()).thenReturn(hypervisorType); + when(details.getArch()).thenReturn(arch); File mockFile = Mockito.mock(File.class); + when(details.isFileChecksumDifferent(mockFile)).thenReturn(true); + SystemVmTemplateRegistration.METADATA_TEMPLATE_LIST.add(details); doReturn(mockFile).when(systemVmTemplateRegistration).getTemplateFile(details); - doReturn(true).when(systemVmTemplateRegistration).isTemplateFileChecksumDifferent(details, mockFile); systemVmTemplateRegistration.validateTemplates(list); } + @Test(expected = CloudRuntimeException.class) public void testValidateTemplates_downloadableFileNotFound() { CPU.CPUArch arch = SystemVmTemplateRegistration.DOWNLOADABLE_TEMPLATE_ARCH_TYPES.get(0); List> list = new ArrayList<>(); - list.add(new Pair<>(Hypervisor.HypervisorType.KVM, arch)); + Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM; + list.add(new Pair<>(hypervisorType, arch)); SystemVmTemplateRegistration.MetadataTemplateDetails details = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); - SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey( - Hypervisor.HypervisorType.KVM, arch), details); + when(details.getHypervisorType()).thenReturn(hypervisorType); + when(details.getArch()).thenReturn(arch); + SystemVmTemplateRegistration.METADATA_TEMPLATE_LIST.add(details); doReturn(null).when(systemVmTemplateRegistration).getTemplateFile(details); systemVmTemplateRegistration.validateTemplates(list); } @Test public void testValidateTemplates_success() { + Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM; + CPU.CPUArch arch = CPU.CPUArch.amd64; List> list = new ArrayList<>(); - list.add(new Pair<>(Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64)); - + list.add(new Pair<>(hypervisorType, arch)); SystemVmTemplateRegistration.MetadataTemplateDetails details = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); - SystemVmTemplateRegistration.NewTemplateMap.put(SystemVmTemplateRegistration.getHypervisorArchKey( - Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64), details); + when(details.getHypervisorType()).thenReturn(hypervisorType); + when(details.getArch()).thenReturn(arch); File mockFile = Mockito.mock(File.class); + when(details.isFileChecksumDifferent(mockFile)).thenReturn(false); + SystemVmTemplateRegistration.METADATA_TEMPLATE_LIST.add(details); doReturn(mockFile).when(systemVmTemplateRegistration).getTemplateFile(details); - doReturn(false).when(systemVmTemplateRegistration).isTemplateFileChecksumDifferent(details, mockFile); systemVmTemplateRegistration.validateTemplates(list); } @Test - public void testRegisterTemplatesForZone() { + public void testAddExistingTemplatesForZoneToStore() { long zoneId = 1L; String filePath = "dummyFilePath"; String nfsVersion = "nfs3"; Pair storeUrlAndId = new Pair<>("nfs://dummy", 100L); + String name = "existing"; + String url = "url"; doReturn(storeUrlAndId).when(systemVmTemplateRegistration).getNfsStoreInZone(zoneId); doReturn(nfsVersion).when(systemVmTemplateRegistration).getNfsVersion(storeUrlAndId.second()); try (MockedStatic mockedStatic = Mockito.mockStatic( @@ -407,21 +471,1167 @@ public void testRegisterTemplatesForZone() { doReturn(hypervisorArchList).when(clusterDao).listDistinctHypervisorsAndArchExcludingExternalType(zoneId); SystemVmTemplateRegistration.MetadataTemplateDetails details = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); - String name = "existing"; - Mockito.when(details.getArch()).thenReturn(CPU.CPUArch.getDefault()); - Mockito.when(details.getName()).thenReturn(name); + when(details.getArch()).thenReturn(CPU.CPUArch.getDefault()); + when(details.getName()).thenReturn(name); + when(details.getUrl()).thenReturn(url); mockedStatic.when(() -> SystemVmTemplateRegistration.getMetadataTemplateDetails(Mockito.any(), Mockito.any())).thenReturn(details); - when(systemVmTemplateRegistration.getRegisteredTemplate(name, arch)) - .thenReturn(null); - doNothing().when(systemVmTemplateRegistration).registerTemplateForNonExistingEntries( - hypervisorType, arch, - name, storeUrlAndId, filePath); + doNothing().when(systemVmTemplateRegistration).registerNewTemplate(name, details, zoneId, + storeUrlAndId.second(), filePath); systemVmTemplateRegistration.registerTemplatesForZone(zoneId, filePath); mockedStatic.verify(() -> SystemVmTemplateRegistration.mountStore(storeUrlAndId.first(), filePath, nfsVersion)); - verify(systemVmTemplateRegistration).registerTemplateForNonExistingEntries(hypervisorType, - arch, name, storeUrlAndId, filePath); + verify(systemVmTemplateRegistration).registerNewTemplate(name, details, zoneId, + storeUrlAndId.second(), filePath); + } + } + + @Test + public void updateOrRegisterSystemVmTemplate_UpdatesRegisteredTemplate() { + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + when(templateDetails.getName()).thenReturn("templateName"); + when(templateDetails.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + when(templateDetails.getArch()).thenReturn(CPU.CPUArch.amd64); + when(templateDetails.getUrl()).thenReturn("http://example.com/template"); + VMTemplateVO registeredTemplate = Mockito.mock(VMTemplateVO.class); + when(registeredTemplate.getId()).thenReturn(1L); + doReturn(registeredTemplate).when(systemVmTemplateRegistration).getRegisteredTemplate( + "templateName", Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64, "http://example.com/template"); + doNothing().when(systemVmTemplateRegistration).updateRegisteredTemplateDetails(1L, templateDetails, + null); + + boolean result = systemVmTemplateRegistration.updateOrRegisterSystemVmTemplate(templateDetails, + new ArrayList<>()); + + assertFalse(result); + verify(systemVmTemplateRegistration).updateRegisteredTemplateDetails(1L, templateDetails, null); + } + + @Test + public void updateOrRegisterSystemVmTemplate_SkipsUnusedHypervisorArch() { + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + when(templateDetails.getName()).thenReturn("templateName"); + when(templateDetails.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + when(templateDetails.getArch()).thenReturn(CPU.CPUArch.amd64); + when(templateDetails.getUrl()).thenReturn("http://example.com/template"); + doReturn(null).when(systemVmTemplateRegistration).getRegisteredTemplate( + "templateName", Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64, "http://example.com/template"); + doReturn(null).when(vmTemplateDao).findLatestTemplateByTypeAndHypervisorAndArch( + Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64, Storage.TemplateType.SYSTEM); + + boolean result = systemVmTemplateRegistration.updateOrRegisterSystemVmTemplate(templateDetails, new ArrayList<>()); + + assertFalse(result); + verify(systemVmTemplateRegistration, never()).registerTemplates(anyList()); + } + + @Test + public void updateOrRegisterSystemVmTemplate_RegistersNewTemplate() { + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + when(templateDetails.getName()).thenReturn("templateName"); + when(templateDetails.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + when(templateDetails.getArch()).thenReturn(CPU.CPUArch.amd64); + when(templateDetails.getUrl()).thenReturn("http://example.com/template"); + doReturn(null).when(systemVmTemplateRegistration).getRegisteredTemplate( + "templateName", Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64, "http://example.com/template"); + List> hypervisorsInUse = new ArrayList<>(); + hypervisorsInUse.add(new Pair<>(Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64)); + doNothing().when(systemVmTemplateRegistration).registerTemplates(hypervisorsInUse); + + boolean result = systemVmTemplateRegistration.updateOrRegisterSystemVmTemplate(templateDetails, hypervisorsInUse); + + assertTrue(result); + verify(systemVmTemplateRegistration).registerTemplates(eq(hypervisorsInUse)); + } + + @Test + public void updateOrRegisterSystemVmTemplate_ThrowsExceptionOnRegistrationFailure() { + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + when(templateDetails.getName()).thenReturn("templateName"); + when(templateDetails.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + when(templateDetails.getArch()).thenReturn(CPU.CPUArch.amd64); + when(templateDetails.getUrl()).thenReturn("http://example.com/template"); + doReturn(null).when(systemVmTemplateRegistration).getRegisteredTemplate( + "templateName", Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64, "http://example.com/template"); + List> hypervisorsInUse = new ArrayList<>(); + hypervisorsInUse.add(new Pair<>(Hypervisor.HypervisorType.KVM, CPU.CPUArch.amd64)); + doThrow(new CloudRuntimeException("Registration failed")).when(systemVmTemplateRegistration).registerTemplates(hypervisorsInUse); + + CloudRuntimeException exception = assertThrows(CloudRuntimeException.class, + () -> systemVmTemplateRegistration.updateOrRegisterSystemVmTemplate(templateDetails, hypervisorsInUse)); + + assertTrue(exception.getMessage().contains("Failed to register")); + } + + @Test + public void updateRegisteredTemplateDetails_UpdatesTemplateSuccessfully() { + Long templateId = 1L; + Long zoneId = 2L; + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = + Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class); + GuestOSVO guestOS = Mockito.mock(GuestOSVO.class); + + when(templateDetails.getGuestOs()).thenReturn("Debian"); + when(templateDetails.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + when(templateDetails.getName()).thenReturn("templateName"); + when(vmTemplateDao.findById(templateId)).thenReturn(templateVO); + when(guestOSDao.findOneByDisplayName("Debian")).thenReturn(guestOS); + when(guestOS.getId()).thenReturn(10L); + when(vmTemplateDao.update(templateVO.getId(), templateVO)).thenReturn(true); + doNothing().when(systemVmTemplateRegistration).updateSystemVMEntries(templateId, Hypervisor.HypervisorType.KVM); + doNothing().when(systemVmTemplateRegistration).updateConfigurationParams(Hypervisor.HypervisorType.KVM, + "templateName", zoneId); + + systemVmTemplateRegistration.updateRegisteredTemplateDetails(templateId, templateDetails, zoneId); + + verify(templateVO).setTemplateType(Storage.TemplateType.SYSTEM); + verify(templateVO).setGuestOSId(10); + verify(vmTemplateDao).update(templateVO.getId(), templateVO); + verify(systemVmTemplateRegistration).updateSystemVMEntries(templateId, Hypervisor.HypervisorType.KVM); + verify(systemVmTemplateRegistration).updateConfigurationParams(Hypervisor.HypervisorType.KVM, + "templateName", zoneId); + } + + @Test + public void updateRegisteredTemplateDetails_ThrowsExceptionWhenUpdateFails() { + Long templateId = 1L; + Long zoneId = 2L; + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = + Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class); + + when(templateDetails.getGuestOs()).thenReturn("Debian"); + when(vmTemplateDao.findById(templateId)).thenReturn(templateVO); + when(vmTemplateDao.update(templateVO.getId(), templateVO)).thenReturn(false); + + CloudRuntimeException exception = assertThrows(CloudRuntimeException.class, + () -> systemVmTemplateRegistration.updateRegisteredTemplateDetails(templateId, templateDetails, zoneId)); + + assertTrue(exception.getMessage().contains("Exception while updating template with id")); + verify(systemVmTemplateRegistration, never()).updateSystemVMEntries(anyLong(), any()); + verify(systemVmTemplateRegistration, never()).updateConfigurationParams(any(), any(), any()); + } + + @Test + public void updateRegisteredTemplateDetails_SkipsGuestOSUpdateWhenNotFound() { + Long templateId = 1L; + Long zoneId = 2L; + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = + Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class); + + when(templateDetails.getGuestOs()).thenReturn("NonExistentOS"); + when(templateDetails.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + when(templateDetails.getName()).thenReturn("templateName"); + when(vmTemplateDao.findById(templateId)).thenReturn(templateVO); + when(guestOSDao.findOneByDisplayName("NonExistentOS")).thenReturn(null); + when(vmTemplateDao.update(templateVO.getId(), templateVO)).thenReturn(true); + doNothing().when(systemVmTemplateRegistration).updateSystemVMEntries(templateId, Hypervisor.HypervisorType.KVM); + doNothing().when(systemVmTemplateRegistration).updateConfigurationParams(Hypervisor.HypervisorType.KVM, + "templateName", zoneId); + + systemVmTemplateRegistration.updateRegisteredTemplateDetails(templateId, templateDetails, zoneId); + + verify(templateVO, never()).setGuestOSId(anyInt()); + verify(vmTemplateDao).update(templateVO.getId(), templateVO); + verify(systemVmTemplateRegistration).updateSystemVMEntries(templateId, Hypervisor.HypervisorType.KVM); + verify(systemVmTemplateRegistration).updateConfigurationParams(Hypervisor.HypervisorType.KVM, + "templateName", zoneId); + } + + @Test + public void registerTemplatesForZone_SuccessfullyRegistersNewTemplate() { + long zoneId = 1L; + String storeMountPath = "/mnt/nfs"; + Pair storeUrlAndId = new Pair<>("nfs://dummy", 100L); + String nfsVersion = "nfs3"; + List> hypervisorArchList = new ArrayList<>(); + Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM; + CPU.CPUArch arch = CPU.CPUArch.amd64; + hypervisorArchList.add(new Pair<>(hypervisorType, arch)); + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = + Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + when(templateDetails.getHypervisorType()).thenReturn(hypervisorType); + when(templateDetails.getArch()).thenReturn(arch); + String name = "TestTemplate"; + String url = "http://example.com/template"; + when(templateDetails.getName()).thenReturn(name); + when(templateDetails.getUrl()).thenReturn(url); + doReturn(storeUrlAndId).when(systemVmTemplateRegistration).getNfsStoreInZone(zoneId); + doReturn(nfsVersion).when(systemVmTemplateRegistration).getNfsVersion(storeUrlAndId.second()); + doReturn(null).when(systemVmTemplateRegistration).getRegisteredTemplate( + name, hypervisorType, arch, url); + doNothing().when(systemVmTemplateRegistration).registerNewTemplate( + name, templateDetails, zoneId, storeUrlAndId.second(), storeMountPath); + doReturn(hypervisorArchList).when(clusterDao).listDistinctHypervisorsAndArchExcludingExternalType(zoneId); + try (MockedStatic mockedStatic = + Mockito.mockStatic(SystemVmTemplateRegistration.class)) { + mockedStatic.when(() -> SystemVmTemplateRegistration.getMetadataTemplateDetails( + hypervisorType, arch)).thenReturn(templateDetails); + + systemVmTemplateRegistration.registerTemplatesForZone(zoneId, storeMountPath); + + mockedStatic.verify(() -> SystemVmTemplateRegistration.mountStore( + eq(storeUrlAndId.first()), eq(storeMountPath), eq(nfsVersion)), times(1)); + verify(systemVmTemplateRegistration).registerNewTemplate( + templateDetails.getName(), templateDetails, zoneId, storeUrlAndId.second(), storeMountPath); + } + } + + @Test + public void registerTemplatesForZone_SkipsWhenTemplateDetailsNotFound() { + long zoneId = 1L; + String storeMountPath = "/mnt/nfs"; + Pair storeUrlAndId = new Pair<>("nfs://dummy", 100L); + String nfsVersion = "nfs3"; + List> hypervisorArchList = new ArrayList<>(); + Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM; + CPU.CPUArch arch = CPU.CPUArch.amd64; + hypervisorArchList.add(new Pair<>(hypervisorType, arch)); + doReturn(storeUrlAndId).when(systemVmTemplateRegistration).getNfsStoreInZone(zoneId); + doReturn(nfsVersion).when(systemVmTemplateRegistration).getNfsVersion(storeUrlAndId.second()); + doReturn(hypervisorArchList).when(clusterDao).listDistinctHypervisorsAndArchExcludingExternalType(zoneId); + + try (MockedStatic mockedStatic = + Mockito.mockStatic(SystemVmTemplateRegistration.class)) { + mockedStatic.when(() -> SystemVmTemplateRegistration.getMetadataTemplateDetails( + hypervisorType, arch)).thenReturn(null); + + systemVmTemplateRegistration.registerTemplatesForZone(zoneId, storeMountPath); + + mockedStatic.verify(() -> SystemVmTemplateRegistration.mountStore( + eq(storeUrlAndId.first()), eq(storeMountPath), eq(nfsVersion)), times(1)); + verify(systemVmTemplateRegistration, never()).registerNewTemplate(any(), any(), anyLong(), anyLong(), any()); + } + } + + @Test + public void registerTemplatesForZone_AddsExistingTemplateToStore() { + long zoneId = 1L; + String storeMountPath = "/mnt/nfs"; + Pair storeUrlAndId = new Pair<>("nfs://dummy", 100L); + String nfsVersion = "nfs3"; + List> hypervisorArchList = new ArrayList<>(); + Hypervisor.HypervisorType hypervisorType = Hypervisor.HypervisorType.KVM; + CPU.CPUArch arch = CPU.CPUArch.amd64; + hypervisorArchList.add(new Pair<>(hypervisorType, arch)); + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = + Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + when(templateDetails.getHypervisorType()).thenReturn(hypervisorType); + when(templateDetails.getArch()).thenReturn(arch); + String name = "TestTemplate"; + String url = "http://example.com/template"; + when(templateDetails.getName()).thenReturn(name); + when(templateDetails.getUrl()).thenReturn(url); + VMTemplateVO templateVO = Mockito.mock(VMTemplateVO.class); + long templateId = 100L; + when(templateVO.getId()).thenReturn(templateId); + TemplateDataStoreVO templateDataStoreVO = Mockito.mock(TemplateDataStoreVO.class); + String installPath = "/template/install/path"; + when(templateDataStoreVO.getInstallPath()).thenReturn(installPath); + + doReturn(storeUrlAndId).when(systemVmTemplateRegistration).getNfsStoreInZone(zoneId); + doReturn(nfsVersion).when(systemVmTemplateRegistration).getNfsVersion(storeUrlAndId.second()); + doReturn(hypervisorArchList).when(clusterDao).listDistinctHypervisorsAndArchExcludingExternalType(zoneId); + doReturn(templateVO).when(systemVmTemplateRegistration).getRegisteredTemplate(name, hypervisorType, arch, url); + doReturn(templateDataStoreVO).when(templateDataStoreDao) + .findByStoreTemplate(storeUrlAndId.second(), templateId); + doReturn(false).when(systemVmTemplateRegistration).validateIfSeeded( + templateDataStoreVO, storeUrlAndId.first(), installPath, nfsVersion); + doNothing().when(systemVmTemplateRegistration).addExistingTemplateToStore( + templateVO, templateDetails, templateDataStoreVO, zoneId, storeUrlAndId.second(), storeMountPath); + doNothing().when(systemVmTemplateRegistration).updateRegisteredTemplateDetails( + templateId, templateDetails, zoneId); + + try (MockedStatic mockedStatic = + Mockito.mockStatic(SystemVmTemplateRegistration.class)) { + mockedStatic.when(() -> SystemVmTemplateRegistration.getMetadataTemplateDetails( + hypervisorType, arch)).thenReturn(templateDetails); + + systemVmTemplateRegistration.registerTemplatesForZone(zoneId, storeMountPath); + + verify(systemVmTemplateRegistration).addExistingTemplateToStore( + templateVO, templateDetails, templateDataStoreVO, zoneId, storeUrlAndId.second(), storeMountPath); + verify(systemVmTemplateRegistration).updateRegisteredTemplateDetails(templateId, templateDetails, zoneId); + } + } + + @Test + public void performTemplateRegistrationOperations_CreatesNewTemplateWhenNotExists() { + String name = "TestTemplate"; + String url = "http://example.com/template"; + String checksum = "abc123"; + Storage.ImageFormat format = Storage.ImageFormat.QCOW2; + long guestOsId = 1L; + Long storeId = 100L; + Long templateId = null; + String filePath = "/mnt/nfs"; + TemplateDataStoreVO templateDataStoreVO = null; + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + + when(templateDetails.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + when(templateDetails.getArch()).thenReturn(CPU.CPUArch.amd64); + doReturn(new VMTemplateVO()).when(vmTemplateDao).persist(any()); + doNothing().when(systemVmTemplateRegistration).createCrossZonesTemplateZoneRefEntries(anyLong()); + doNothing().when(systemVmTemplateRegistration).createTemplateStoreRefEntry(any()); + doNothing().when(systemVmTemplateRegistration).setupTemplateOnStore(anyString(), any(), anyString()); + doNothing().when(systemVmTemplateRegistration).readTemplateProperties(anyString(), any()); + doNothing().when(systemVmTemplateRegistration).updateTemplateDetails(any()); + + Long result = systemVmTemplateRegistration.performTemplateRegistrationOperations(name, templateDetails, url, checksum, format, guestOsId, storeId, templateId, filePath, templateDataStoreVO); + + assertNotNull(result); + verify(vmTemplateDao).persist(any()); + verify(systemVmTemplateRegistration).createCrossZonesTemplateZoneRefEntries(anyLong()); + verify(systemVmTemplateRegistration).createTemplateStoreRefEntry(any()); + verify(systemVmTemplateRegistration).setupTemplateOnStore(anyString(), any(), anyString()); + verify(systemVmTemplateRegistration).updateTemplateDetails(any()); + } + + @Test + public void performTemplateRegistrationOperations_UpdatesExistingTemplate() { + String name = "TestTemplate"; + String url = "http://example.com/template"; + String checksum = "abc123"; + Storage.ImageFormat format = Storage.ImageFormat.QCOW2; + long guestOsId = 1L; + Long storeId = 100L; + Long templateId = 1L; + String filePath = "/mnt/nfs"; + TemplateDataStoreVO templateDataStoreVO = Mockito.mock(TemplateDataStoreVO.class); + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + + when(templateDetails.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + when(templateDetails.getArch()).thenReturn(CPU.CPUArch.amd64); + doNothing().when(systemVmTemplateRegistration).createCrossZonesTemplateZoneRefEntries(anyLong()); + doNothing().when(systemVmTemplateRegistration).setupTemplateOnStore(anyString(), any(), anyString()); + doNothing().when(systemVmTemplateRegistration).readTemplateProperties(anyString(), any()); + doNothing().when(systemVmTemplateRegistration).updateTemplateDetails(any()); + + Long result = systemVmTemplateRegistration.performTemplateRegistrationOperations(name, templateDetails, url, checksum, format, guestOsId, storeId, templateId, filePath, templateDataStoreVO); + + assertNotNull(result); + assertEquals(templateId, result); + verify(vmTemplateDao, never()).persist(any()); + verify(systemVmTemplateRegistration).createCrossZonesTemplateZoneRefEntries(anyLong()); + verify(systemVmTemplateRegistration, never()).createTemplateStoreRefEntry(any()); + verify(systemVmTemplateRegistration).setupTemplateOnStore(anyString(), any(), anyString()); + verify(systemVmTemplateRegistration).updateTemplateDetails(any()); + } + + @Test + public void performTemplateRegistrationOperations_ThrowsExceptionWhenTemplateCreationFails() { + String name = "TestTemplate"; + String url = "http://example.com/template"; + String checksum = "abc123"; + Storage.ImageFormat format = Storage.ImageFormat.QCOW2; + long guestOsId = 1L; + Long storeId = 100L; + Long templateId = null; + String filePath = "/mnt/nfs"; + TemplateDataStoreVO templateDataStoreVO = null; + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + + when(templateDetails.getHypervisorType()).thenReturn(Hypervisor.HypervisorType.KVM); + when(templateDetails.getArch()).thenReturn(CPU.CPUArch.amd64); + doReturn(null).when(vmTemplateDao).persist(any()); + + assertThrows(CloudRuntimeException.class, () -> { + systemVmTemplateRegistration.performTemplateRegistrationOperations(name, templateDetails, url, checksum, format, guestOsId, storeId, templateId, filePath, templateDataStoreVO); + }); + + verify(vmTemplateDao).persist(any()); + verify(systemVmTemplateRegistration, never()).createCrossZonesTemplateZoneRefEntries(anyLong()); + verify(systemVmTemplateRegistration, never()).createTemplateStoreRefEntry(any()); + verify(systemVmTemplateRegistration, never()).setupTemplateOnStore(anyString(), any(), anyString()); + verify(systemVmTemplateRegistration, never()).updateTemplateDetails(any()); + } + + @Test + public void setupTemplateOnStore_ThrowsExceptionWhenScriptNotFound() { + String templateName = "templateName"; + String destTempFolder = "/tmp/folder"; + SystemVmTemplateRegistration.MetadataTemplateDetails templateDetails = + Mockito.mock(SystemVmTemplateRegistration.MetadataTemplateDetails.class); + + try (MockedStatic + + diff --git a/ui/src/components/view/stats/ResourceStatsInfo.vue b/ui/src/components/view/stats/ResourceStatsInfo.vue index 6898141bdfc5..104283436626 100644 --- a/ui/src/components/view/stats/ResourceStatsInfo.vue +++ b/ui/src/components/view/stats/ResourceStatsInfo.vue @@ -1,17 +1,17 @@ // Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file +// or more contributor license agreements. See the NOTICE file // distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file +// regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at +// with the License. You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. diff --git a/ui/src/components/view/stats/ResourceStatsLineChart.vue b/ui/src/components/view/stats/ResourceStatsLineChart.vue index fa15ea398a5e..399e77bebd46 100644 --- a/ui/src/components/view/stats/ResourceStatsLineChart.vue +++ b/ui/src/components/view/stats/ResourceStatsLineChart.vue @@ -1,17 +1,17 @@ // Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file +// or more contributor license agreements. See the NOTICE file // distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file +// regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at +// with the License. You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. diff --git a/ui/src/config/section/compute.js b/ui/src/config/section/compute.js index 63d0e365db92..a03693e351d8 100644 --- a/ui/src/config/section/compute.js +++ b/ui/src/config/section/compute.js @@ -18,6 +18,7 @@ import { shallowRef, defineAsyncComponent } from 'vue' import store from '@/store' import { isZoneCreated } from '@/utils/zone' +import kubernetesIcon from '@/assets/icons/kubernetes.svg?inline' export default { name: 'compute', @@ -551,7 +552,7 @@ export default { { name: 'kubernetes', title: 'label.kubernetes', - icon: ['fa-solid', 'fa-dharmachakra'], + icon: kubernetesIcon, docHelp: 'plugins/cloudstack-kubernetes-service.html', searchFilters: ['name', 'domainid', 'account', 'state'], permission: ['listKubernetesClusters'], diff --git a/ui/src/config/section/domain.js b/ui/src/config/section/domain.js index fbe20ef8891b..706cbf805cfa 100644 --- a/ui/src/config/section/domain.js +++ b/ui/src/config/section/domain.js @@ -144,7 +144,7 @@ export default { docHelp: 'adminguide/accounts.html#using-an-ldap-server-for-user-authentication', listView: true, dataView: true, - args: ['type', 'domainid', 'name', 'accounttype', 'admin'], + args: ['type', 'domainid', 'ldapdomain', 'accounttype', 'admin'], mapping: { type: { options: ['GROUP', 'OU'] @@ -157,6 +157,20 @@ export default { } } }, + { + api: 'unlinkDomainFromLdap', + icon: 'ArrowsAltOutlined', + label: 'label.unlink.domain.from.ldap', + docHelp: 'adminguide/accounts.html#using-an-ldap-server-for-user-authentication', + listView: true, + dataView: true, + args: ['domainid'], + mapping: { + domainid: { + value: (record) => { return record.id } + } + } + }, { api: 'deleteDomain', icon: 'delete-outlined', diff --git a/ui/src/config/section/image.js b/ui/src/config/section/image.js index d93e27c3f222..bae4a40c9a8d 100644 --- a/ui/src/config/section/image.js +++ b/ui/src/config/section/image.js @@ -18,6 +18,7 @@ import { shallowRef, defineAsyncComponent } from 'vue' import store from '@/store' import { isZoneCreated } from '@/utils/zone' +import kubernetesIcon from '@/assets/icons/kubernetes.svg?inline' export default { name: 'image', @@ -367,7 +368,7 @@ export default { { name: 'kubernetesiso', title: 'label.kubernetes.isos', - icon: ['fa-solid', 'fa-dharmachakra'], + icon: kubernetesIcon, docHelp: 'plugins/cloudstack-kubernetes-service.html#kubernetes-supported-versions', permission: ['listKubernetesSupportedVersions'], searchFilters: ['zoneid', 'minimumsemanticversion', 'arch'], diff --git a/ui/src/config/section/infra/systemVms.js b/ui/src/config/section/infra/systemVms.js index 6e135ccdd36a..4a5879b17626 100644 --- a/ui/src/config/section/infra/systemVms.js +++ b/ui/src/config/section/infra/systemVms.js @@ -26,7 +26,7 @@ export default { permission: ['listSystemVms'], searchFilters: ['name', 'zoneid', 'podid', 'hostid', 'systemvmtype', 'storageid', 'arch'], columns: ['name', 'state', 'agentstate', 'systemvmtype', 'publicip', 'privateip', 'linklocalip', 'version', 'hostname', 'arch', 'zonename'], - details: ['name', 'id', 'agentstate', 'systemvmtype', 'publicip', 'privateip', 'linklocalip', 'gateway', 'hostname', 'arch', 'version', 'zonename', 'created', 'activeviewersessions', 'isdynamicallyscalable', 'hostcontrolstate'], + details: ['name', 'id', 'agentstate', 'systemvmtype', 'publicip', 'privateip', 'linklocalip', 'gateway', 'hostname', 'arch', 'version', 'zonename', 'created', 'activeviewersessions', 'isdynamicallyscalable', 'hostcontrolstate', 'storageip'], resourceType: 'SystemVm', filters: () => { const filters = ['starting', 'running', 'stopping', 'stopped', 'destroyed', 'expunging', 'migrating', 'error', 'unknown', 'shutdown'] diff --git a/ui/src/config/section/network.js b/ui/src/config/section/network.js index acc7424c9f0c..33b39d271726 100644 --- a/ui/src/config/section/network.js +++ b/ui/src/config/section/network.js @@ -1266,15 +1266,11 @@ export default { { api: 'updateVpnCustomerGateway', icon: 'edit-outlined', - label: 'label.edit', + label: 'label.update.vpn.customer.gateway', docHelp: 'adminguide/networking_and_traffic.html#updating-and-removing-a-vpn-customer-gateway', dataView: true, - args: ['name', 'gateway', 'cidrlist', 'ipsecpsk', 'ikepolicy', 'ikelifetime', 'ikeversion', 'esppolicy', 'esplifetime', 'dpd', 'splitconnections', 'forceencap'], - mapping: { - ikeversion: { - options: ['ike', 'ikev1', 'ikev2'] - } - } + popup: true, + component: shallowRef(defineAsyncComponent(() => import('@/views/network/UpdateVpnCustomerGateway.vue'))) }, { api: 'deleteVpnCustomerGateway', diff --git a/ui/src/config/section/offering.js b/ui/src/config/section/offering.js index 4a32619b8c2f..bc95772d6f7a 100644 --- a/ui/src/config/section/offering.js +++ b/ui/src/config/section/offering.js @@ -340,9 +340,9 @@ export default { icon: 'cloud-upload-outlined', docHelp: 'adminguide/virtual_machines.html#backup-offerings', permission: ['listBackupOfferings'], - searchFilters: ['zoneid'], - columns: ['name', 'description', 'zonename'], - details: ['name', 'id', 'description', 'externalid', 'zone', 'allowuserdrivenbackups', 'created'], + searchFilters: ['zoneid', 'domainid'], + columns: ['name', 'description', 'domain', 'zonename'], + details: ['name', 'id', 'description', 'externalid', 'domain', 'zone', 'allowuserdrivenbackups', 'created'], related: [{ name: 'vm', title: 'label.instances', diff --git a/ui/src/config/section/tools.js b/ui/src/config/section/tools.js index a07228ca87b4..5b7f4b9af325 100644 --- a/ui/src/config/section/tools.js +++ b/ui/src/config/section/tools.js @@ -116,6 +116,10 @@ export default { name: 'details', component: shallowRef(defineAsyncComponent(() => import('@/components/view/DetailsTab.vue'))) }, + { + name: 'filters', + component: shallowRef(defineAsyncComponent(() => import('@/components/view/WebhookFiltersTab.vue'))) + }, { name: 'recent.deliveries', component: shallowRef(defineAsyncComponent(() => import('@/components/view/WebhookDeliveriesTab.vue'))) diff --git a/ui/src/utils/plugins.js b/ui/src/utils/plugins.js index 306eb9d1f594..9125e7744496 100644 --- a/ui/src/utils/plugins.js +++ b/ui/src/utils/plugins.js @@ -558,7 +558,8 @@ export const cpuArchitectureUtilPlugin = { app.config.globalProperties.$fetchCpuArchitectureTypes = function () { const architectures = [ { id: 'x86_64', name: 'Intel/AMD 64 bits (x86_64)' }, - { id: 'aarch64', name: 'ARM 64 bits (aarch64)' } + { id: 'aarch64', name: 'ARM 64 bits (aarch64)' }, + { id: 's390x', name: 'IBM Z 64 bits (s390x)' } ] return architectures.map(item => ({ ...item, description: item.name })) } diff --git a/ui/src/utils/renderIcon.js b/ui/src/utils/renderIcon.js index 8d982fd4adc0..b92fba28548b 100644 --- a/ui/src/utils/renderIcon.js +++ b/ui/src/utils/renderIcon.js @@ -45,8 +45,9 @@ export default { const props = Object.assign({}, this.props) props.width = '1em' props.height = '1em' - props.class = 'custom-icon' - + if (!this.$attrs.style) { + props.class = 'custom-icon' + } return h('span', { role: 'img', class: 'anticon' }, [ h(this.svgIcon, { ...props }, this.event) ]) diff --git a/ui/src/views/compute/AttachIso.vue b/ui/src/views/compute/AttachIso.vue index aafed017a213..60694cb8f57b 100644 --- a/ui/src/views/compute/AttachIso.vue +++ b/ui/src/views/compute/AttachIso.vue @@ -85,10 +85,10 @@ export default { }) }, fetchData () { - const isoFiters = ['featured', 'community', 'selfexecutable'] + const isoFilters = ['featured', 'community', 'selfexecutable'] this.loading = true const promises = [] - isoFiters.forEach((filter) => { + isoFilters.forEach((filter) => { promises.push(this.fetchIsos(filter)) }) Promise.all(promises).then(() => { diff --git a/ui/src/views/compute/CreateAutoScaleVmGroup.vue b/ui/src/views/compute/CreateAutoScaleVmGroup.vue index 362c9b7b4063..9e8ebb8a4d59 100644 --- a/ui/src/views/compute/CreateAutoScaleVmGroup.vue +++ b/ui/src/views/compute/CreateAutoScaleVmGroup.vue @@ -3172,12 +3172,12 @@ export default { configuration.cpunumber = 0 configuration.cpuspeed = 0 configuration.memory = 0 - for (var harwareItem of configuration.hardwareItems) { - if (harwareItem.resourceType === 'Processor') { - configuration.cpunumber = harwareItem.virtualQuantity - configuration.cpuspeed = harwareItem.reservation - } else if (harwareItem.resourceType === 'Memory') { - configuration.memory = harwareItem.virtualQuantity + for (var hardwareItem of configuration.hardwareItems) { + if (hardwareItem.resourceType === 'Processor') { + configuration.cpunumber = hardwareItem.virtualQuantity + configuration.cpuspeed = hardwareItem.reservation + } else if (hardwareItem.resourceType === 'Memory') { + configuration.memory = hardwareItem.virtualQuantity } } configurations.push(configuration) diff --git a/ui/src/views/compute/CreateKubernetesCluster.vue b/ui/src/views/compute/CreateKubernetesCluster.vue index 68c7a35e3e5e..1799933bf2ef 100644 --- a/ui/src/views/compute/CreateKubernetesCluster.vue +++ b/ui/src/views/compute/CreateKubernetesCluster.vue @@ -930,7 +930,7 @@ export default { description: values.name, loadingMessage: `${this.$t('label.kubernetes.cluster.create')} ${values.name} ${this.$t('label.in.progress')}`, catchMessage: this.$t('error.fetching.async.job.result'), - successMessage: this.$t('message.success.create.kubernetes.cluter') + ' ' + values.name + successMessage: this.$t('message.success.create.kubernetes.cluster') + ' ' + values.name }) this.closeAction() }).catch(error => { diff --git a/ui/src/views/compute/DeployVM.vue b/ui/src/views/compute/DeployVM.vue index 1966203e7dd7..26176e760051 100644 --- a/ui/src/views/compute/DeployVM.vue +++ b/ui/src/views/compute/DeployVM.vue @@ -1920,7 +1920,7 @@ export default { } this.fetchBootTypes() this.fetchBootModes() - this.fetchInstaceGroups() + this.fetchInstanceGroups() this.fetchIoPolicyTypes() nextTick().then(() => { ['name', 'keyboard', 'boottype', 'bootmode', 'userdata', 'iothreadsenabled', 'iodriverpolicy', 'nicmultiqueuenumber', 'nicpackedvirtqueues'].forEach(this.fillValue) @@ -1976,7 +1976,7 @@ export default { { id: 'storage_specific', description: 'storage_specific' } ] }, - fetchInstaceGroups () { + fetchInstanceGroups () { this.options.instanceGroups = [] getAPI('listInstanceGroups', { account: this.$store.getters.project?.id ? null : this.$store.getters.userInfo.account, @@ -3226,12 +3226,12 @@ export default { configuration.cpunumber = 0 configuration.cpuspeed = 0 configuration.memory = 0 - for (var harwareItem of configuration.hardwareItems) { - if (harwareItem.resourceType === 'Processor') { - configuration.cpunumber = harwareItem.virtualQuantity - configuration.cpuspeed = harwareItem.reservation - } else if (harwareItem.resourceType === 'Memory') { - configuration.memory = harwareItem.virtualQuantity + for (var hardwareItem of configuration.hardwareItems) { + if (hardwareItem.resourceType === 'Processor') { + configuration.cpunumber = hardwareItem.virtualQuantity + configuration.cpuspeed = hardwareItem.reservation + } else if (hardwareItem.resourceType === 'Memory') { + configuration.memory = hardwareItem.virtualQuantity } } configurations.push(configuration) diff --git a/ui/src/views/compute/DeployVnfAppliance.vue b/ui/src/views/compute/DeployVnfAppliance.vue index 36fdd86dbf6d..be154221675e 100644 --- a/ui/src/views/compute/DeployVnfAppliance.vue +++ b/ui/src/views/compute/DeployVnfAppliance.vue @@ -1768,7 +1768,7 @@ export default { } this.fetchBootTypes() this.fetchBootModes() - this.fetchInstaceGroups() + this.fetchInstanceGroups() this.fetchIoPolicyTypes() nextTick().then(() => { ['name', 'keyboard', 'boottype', 'bootmode', 'userdata', 'iothreadsenabled', 'iodriverpolicy', 'nicmultiqueuenumber', 'nicpackedvirtqueues'].forEach(this.fillValue) @@ -1823,7 +1823,7 @@ export default { { id: 'storage_specific', description: 'storage_specific' } ] }, - fetchInstaceGroups () { + fetchInstanceGroups () { this.options.instanceGroups = [] getAPI('listInstanceGroups', { account: this.$store.getters.userInfo.account, @@ -2777,12 +2777,12 @@ export default { configuration.cpunumber = 0 configuration.cpuspeed = 0 configuration.memory = 0 - for (var harwareItem of configuration.hardwareItems) { - if (harwareItem.resourceType === 'Processor') { - configuration.cpunumber = harwareItem.virtualQuantity - configuration.cpuspeed = harwareItem.reservation - } else if (harwareItem.resourceType === 'Memory') { - configuration.memory = harwareItem.virtualQuantity + for (var hardwareItem of configuration.hardwareItems) { + if (hardwareItem.resourceType === 'Processor') { + configuration.cpunumber = hardwareItem.virtualQuantity + configuration.cpuspeed = hardwareItem.reservation + } else if (hardwareItem.resourceType === 'Memory') { + configuration.memory = hardwareItem.virtualQuantity } } configurations.push(configuration) diff --git a/ui/src/views/compute/EditVM.vue b/ui/src/views/compute/EditVM.vue index 0763303b24a4..19055afde973 100644 --- a/ui/src/views/compute/EditVM.vue +++ b/ui/src/views/compute/EditVM.vue @@ -255,7 +255,7 @@ export default { this.fetchZoneDetails() this.fetchSecurityGroups() this.fetchOsTypes() - this.fetchInstaceGroups() + this.fetchInstanceGroups() this.fetchServiceOfferingData() this.fetchTemplateData() this.fetchUserData() @@ -335,7 +335,7 @@ export default { this.$notifyError(error) }).finally(() => { this.osTypes.loading = false }) }, - fetchInstaceGroups () { + fetchInstanceGroups () { this.groups.loading = true this.groups.opts = [] const params = { diff --git a/ui/src/views/compute/ReinstallVm.vue b/ui/src/views/compute/ReinstallVm.vue index 5238252e7958..4cb6317008f8 100644 --- a/ui/src/views/compute/ReinstallVm.vue +++ b/ui/src/views/compute/ReinstallVm.vue @@ -1,17 +1,17 @@ // Licensed to the Apache Software Foundation (ASF) under one -// or more contributor license agreements. See the NOTICE file +// or more contributor license agreements. See the NOTICE file // distributed with this work for additional information -// regarding copyright ownership. The ASF licenses this file +// regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance -// with the License. You may obtain a copy of the License at +// with the License. You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the +// KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. diff --git a/ui/src/views/compute/wizard/OsBasedImageSelection.vue b/ui/src/views/compute/wizard/OsBasedImageSelection.vue index 6a52eea207b0..680922752f74 100644 --- a/ui/src/views/compute/wizard/OsBasedImageSelection.vue +++ b/ui/src/views/compute/wizard/OsBasedImageSelection.vue @@ -47,7 +47,7 @@ @change="handleGuestOsCategoryChange">
- - {{ extenstionBasePath }} + + {{ extensionBasePath }} @@ -207,8 +208,9 @@ export default { this.fetchTimeZone = debounce(this.fetchTimeZone, 800) return { loading: false, - domain: { loading: false }, + domain: { id: null, loading: false }, domainsList: [], + dom: null, roleLoading: false, roles: [], timeZoneLoading: false, @@ -227,14 +229,35 @@ export default { computed: { samlAllowed () { return 'authorizeSamlSso' in this.$store.getters.apis + }, + selectedDomain () { + return this.domainsList.find(domain => domain.id === this.form.domainid) + }, + isNonRootDomain () { + if (!this.selectedDomain) return false + return this.selectedDomain.level > 0 && this.selectedDomain.path !== 'ROOT' + } + }, + watch: { + 'form.domainid': { + handler (newDomainId, oldDomainId) { + if (newDomainId && this.roles.length > 0) { + this.$nextTick(() => { + this.setDefaultRole() + }) + } + }, + immediate: false } }, methods: { initForm () { + var domId = this.$route.query.domainid || this.$store.getters.userInfo.domainid this.formRef = ref() this.form = reactive({ - domainid: this.$store.getters.userInfo.domainid + domainid: domId }) + this.domain.id = domId this.rules = reactive({ roleid: [{ required: true, message: this.$t('message.error.select') }], username: [{ required: true, message: this.$t('message.error.required.input') }], @@ -263,9 +286,36 @@ export default { isDomainAdmin () { return this.$store.getters.userInfo.roletype === 'DomainAdmin' }, + isAdmin () { + return this.$store.getters.userInfo.roletype === 'Admin' + }, isValidValueForKey (obj, key) { return key in obj && obj[key] != null }, + onDomainChange (newDomainId) { + if (newDomainId && this.roles.length > 0) { + this.$nextTick(() => { + this.setDefaultRole() + }) + } + }, + setDefaultRole () { + if (this.roles.length === 0) return + + let targetRoleType = null + + if (this.isAdmin()) { + targetRoleType = this.isNonRootDomain ? 'DomainAdmin' : 'Admin' + } else if (this.isDomainAdmin()) { + targetRoleType = 'User' + } + + const targetRole = targetRoleType + ? this.roles.find(role => role.type === targetRoleType) + : this.roles[0] + + this.form.roleid = (targetRole || this.roles[0]).id + }, async validateConfirmPassword (rule, value) { if (!value || value.length === 0) { return Promise.resolve() @@ -286,17 +336,22 @@ export default { this.loadMore('listDomains', 1, this.domain) }, loadMore (apiToCall, page, sema) { - console.log('sema.loading ' + sema.loading) - const params = {} - params.listAll = true - params.details = 'min' - params.pagesize = 100 - params.page = page + const params = { + listAll: true, + details: 'min', + pagesize: 100, + page: page + } var count getAPI(apiToCall, params).then(json => { const listDomains = json.listdomainsresponse.domain count = json.listdomainsresponse.count this.domainsList = this.domainsList.concat(listDomains) + this.dom = this.domainsList.find(domain => domain.id === this.domain.id) + + if (this.roles.length > 0) { + this.setDefaultRole() + } }).finally(() => { if (count <= this.domainsList.length) { sema.loading = false @@ -307,17 +362,13 @@ export default { }, fetchRoles () { this.roleLoading = true - const params = {} - params.state = 'enabled' + const params = { + state: 'enabled' + } + getAPI('listRoles', params).then(response => { this.roles = response.listrolesresponse.role || [] - this.form.roleid = this.roles[0].id - if (this.isDomainAdmin()) { - const userRole = this.roles.filter(role => role.type === 'User') - if (userRole.length > 0) { - this.form.roleid = userRole[0].id - } - } + this.setDefaultRole() }).finally(() => { this.roleLoading = false }) diff --git a/ui/src/views/iam/CreateRole.vue b/ui/src/views/iam/CreateRole.vue index 11cecf69efe2..eac138fae78d 100644 --- a/ui/src/views/iam/CreateRole.vue +++ b/ui/src/views/iam/CreateRole.vue @@ -202,7 +202,7 @@ export default { this.$emit('refresh-data') this.$notification.success({ message: 'Create Role', - description: 'Sucessfully created role ' + params.name + description: 'Successfully created role ' + params.name }) } this.closeAction() diff --git a/ui/src/views/iam/ImportRole.vue b/ui/src/views/iam/ImportRole.vue index c4dcf8f93b7d..7a8e17c5691b 100644 --- a/ui/src/views/iam/ImportRole.vue +++ b/ui/src/views/iam/ImportRole.vue @@ -213,7 +213,7 @@ export default { this.$emit('refresh-data') this.$notification.success({ message: 'Import Role', - description: 'Sucessfully imported role ' + params.name + description: 'Successfully imported role ' + params.name }) } this.closeAction() diff --git a/ui/src/views/infra/ClusterUpdate.vue b/ui/src/views/infra/ClusterUpdate.vue index 1af7f420e666..590abe6d1f83 100644 --- a/ui/src/views/infra/ClusterUpdate.vue +++ b/ui/src/views/infra/ClusterUpdate.vue @@ -168,6 +168,10 @@ export default { id: 'aarch64', description: 'ARM 64 bits (aarch64)' }) + typesList.push({ + id: 's390x', + description: 'IBM Z 64 bits (s390x)' + }) this.architectureTypes.opts = typesList }, fetchExtensionResourceMapDetails () { diff --git a/ui/src/views/infra/Resources.vue b/ui/src/views/infra/Resources.vue index cad1a026c0be..0055343b30df 100644 --- a/ui/src/views/infra/Resources.vue +++ b/ui/src/views/infra/Resources.vue @@ -35,7 +35,7 @@ v-if="item.tagged" class="list-item__collapse" @change="handleCollapseChange(item.type)"> - + @@ -90,7 +90,7 @@ export default { return { fetchLoading: false, resourcesList: [], - collpaseActive: {} + collapseActive: {} } }, created () { @@ -181,11 +181,11 @@ export default { } }, handleCollapseChange (type) { - if (this.collpaseActive[type]) { - this.collpaseActive[type] = null + if (this.collapseActive[type]) { + this.collapseActive[type] = null return } - this.collpaseActive[type] = true + this.collapseActive[type] = true var typeItems = this.resourcesList.filter(x => x.type === type) typeItems.forEach(resource => { this.animatePercentVals(resource.tagged) diff --git a/ui/src/views/infra/routers/RouterHealthCheck.vue b/ui/src/views/infra/routers/RouterHealthCheck.vue index 89a05d1fb942..0fe49c366c1e 100644 --- a/ui/src/views/infra/routers/RouterHealthCheck.vue +++ b/ui/src/views/infra/routers/RouterHealthCheck.vue @@ -22,7 +22,7 @@ banner :message="$t('message.action.router.health.checks.disabled.warning')" />
- + {{ $t('label.action.router.health.checks') }} @@ -158,7 +158,7 @@ export default { } this.checkConfigurationAndGetHealthChecks() }, - showGetHelathCheck () { + showGetHealthCheck () { this.showGetHealthChecksForm = true }, onCloseGetHealthChecksForm () { diff --git a/ui/src/views/infra/zone/ZoneWizardAddResources.vue b/ui/src/views/infra/zone/ZoneWizardAddResources.vue index b2a273f4c882..25a26dd9446c 100644 --- a/ui/src/views/infra/zone/ZoneWizardAddResources.vue +++ b/ui/src/views/infra/zone/ZoneWizardAddResources.vue @@ -865,6 +865,9 @@ export default { }, { id: 'aarch64', description: 'ARM 64 bits (aarch64)' + }, { + id: 's390x', + description: 'IBM Z 64 bits (s390x)' }], storageProviders: [], currentStep: null, diff --git a/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue b/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue index 006228213291..f1ef34585cac 100644 --- a/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue +++ b/ui/src/views/infra/zone/ZoneWizardLaunchZone.vue @@ -1609,7 +1609,7 @@ export default { try { if (!this.stepData.stepMove.includes('createStoragePool')) { - this.stepData.primaryStorageRetunred = await this.createStoragePool(params) + this.stepData.primaryStorageReturned = await this.createStoragePool(params) this.stepData.stepMove.push('createStoragePool') } await this.stepAddSecondaryStorage() diff --git a/ui/src/views/infra/zone/ZoneWizardRegisterTemplate.vue b/ui/src/views/infra/zone/ZoneWizardRegisterTemplate.vue index 0d94fc5d3b15..40123a57d517 100644 --- a/ui/src/views/infra/zone/ZoneWizardRegisterTemplate.vue +++ b/ui/src/views/infra/zone/ZoneWizardRegisterTemplate.vue @@ -201,7 +201,7 @@ export default { if (successful.length > 0) { this.$notification.success({ message: this.$t('label.register.template'), - description: 'Succesfully registered templates: ' + successful.map(r => r.name).join(', ') + description: 'Successfully registered templates: ' + successful.map(r => r.name).join(', ') }) successful.forEach(r => { diff --git a/ui/src/views/network/CreateVpnCustomerGateway.vue b/ui/src/views/network/CreateVpnCustomerGateway.vue index f71fc4709e8d..155765a276f5 100644 --- a/ui/src/views/network/CreateVpnCustomerGateway.vue +++ b/ui/src/views/network/CreateVpnCustomerGateway.vue @@ -15,352 +15,58 @@ // specific language governing permissions and limitations // under the License. + - diff --git a/ui/src/views/network/UpdateVpnCustomerGateway.vue b/ui/src/views/network/UpdateVpnCustomerGateway.vue new file mode 100644 index 000000000000..2d54e8e031ed --- /dev/null +++ b/ui/src/views/network/UpdateVpnCustomerGateway.vue @@ -0,0 +1,129 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + diff --git a/ui/src/views/network/VpnCustomerGateway.vue b/ui/src/views/network/VpnCustomerGateway.vue new file mode 100644 index 000000000000..c1b1ed78ce06 --- /dev/null +++ b/ui/src/views/network/VpnCustomerGateway.vue @@ -0,0 +1,581 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + + + diff --git a/ui/src/views/offering/AddComputeOffering.vue b/ui/src/views/offering/AddComputeOffering.vue index dc4d5b188181..465d07b8e57f 100644 --- a/ui/src/views/offering/AddComputeOffering.vue +++ b/ui/src/views/offering/AddComputeOffering.vue @@ -714,7 +714,7 @@ export default { domainLoading: false, zones: [], zoneLoading: false, - selectedDeployementPlanner: null, + selectedDeploymentPlanner: null, storagePolicies: null, storageTags: [], storageTagLoading: false, @@ -1021,9 +1021,9 @@ export default { this.qosType = val }, handleDeploymentPlannerChange (planner) { - this.selectedDeployementPlanner = planner + this.selectedDeploymentPlanner = planner this.plannerModeVisible = false - if (this.selectedDeployementPlanner === 'ImplicitDedicationPlanner') { + if (this.selectedDeploymentPlanner === 'ImplicitDedicationPlanner') { this.plannerModeVisible = isAdmin() } }, diff --git a/ui/src/views/offering/AddNetworkOffering.vue b/ui/src/views/offering/AddNetworkOffering.vue index 2b3275a11ed5..abb70abda27a 100644 --- a/ui/src/views/offering/AddNetworkOffering.vue +++ b/ui/src/views/offering/AddNetworkOffering.vue @@ -135,6 +135,7 @@ return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0 }" :placeholder="apiParams.provider.description" > + {{ }} {{ $t('label.nsx') }} {{ $t('label.netris') }} diff --git a/ui/src/views/offering/AddVpcOffering.vue b/ui/src/views/offering/AddVpcOffering.vue index d909cbdc3dca..32aa3e8d3583 100644 --- a/ui/src/views/offering/AddVpcOffering.vue +++ b/ui/src/views/offering/AddVpcOffering.vue @@ -83,6 +83,7 @@ return option.label.toLowerCase().indexOf(input.toLowerCase()) >= 0 }" :placeholder="apiParams.provider.description" > + {{ }} {{ $t('label.nsx') }} {{ $t('label.netris') }} diff --git a/ui/src/views/offering/ImportBackupOffering.vue b/ui/src/views/offering/ImportBackupOffering.vue index b8ac7d8e8e65..f680eacd4a7d 100644 --- a/ui/src/views/offering/ImportBackupOffering.vue +++ b/ui/src/views/offering/ImportBackupOffering.vue @@ -85,6 +85,33 @@ + + + + + + + + + + + {{ opt.path || opt.name || opt.description }} + + + +
{{ this.$t('label.cancel') }} {{ this.$t('label.ok') }} @@ -96,6 +123,7 @@